blob: d982cb09149b937361e0640e9d8a9272ae76bb5f [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
Sathya Perla8788fdc2009-07-27 22:52:03 +0000156static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000160 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000161 return;
162
Sathya Perladb3ea782011-08-22 19:41:52 +0000163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000173
Sathya Perladb3ea782011-08-22 19:41:52 +0000174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176}
177
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000183
184 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000193
194 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196}
197
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000205
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000206 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000207 return;
208
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Sathya Perla8788fdc2009-07-27 22:52:03 +0000218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000224
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000225 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000226 return;
227
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232}
233
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000239 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000240 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000241 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
Sathya Perlaa65027e2009-08-17 00:58:04 +0000274 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000275 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700276
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285 return status;
286}
287
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000288static void populate_be2_stats(struct be_adapter *adapter)
289{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000290 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
291 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
292 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000293 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000294 &rxf_stats->port[adapter->port_num];
295 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000296
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000298 drvs->rx_pause_frames = port_stats->rx_pause_frames;
299 drvs->rx_crc_errors = port_stats->rx_crc_errors;
300 drvs->rx_control_frames = port_stats->rx_control_frames;
301 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
302 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
303 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
304 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
305 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
306 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
307 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
308 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
309 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
310 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
311 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000312 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000313 drvs->rx_dropped_header_too_small =
314 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000315 drvs->rx_address_mismatch_drops =
316 port_stats->rx_address_mismatch_drops +
317 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 drvs->rx_alignment_symbol_errors =
319 port_stats->rx_alignment_symbol_errors;
320
321 drvs->tx_pauseframes = port_stats->tx_pauseframes;
322 drvs->tx_controlframes = port_stats->tx_controlframes;
323
324 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000327 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000328 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000329 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330 drvs->forwarded_packets = rxf_stats->forwarded_packets;
331 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000332 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
333 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
335}
336
337static void populate_be3_stats(struct be_adapter *adapter)
338{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000339 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
340 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
341 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000342 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 &rxf_stats->port[adapter->port_num];
344 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000347 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
348 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 drvs->rx_pause_frames = port_stats->rx_pause_frames;
350 drvs->rx_crc_errors = port_stats->rx_crc_errors;
351 drvs->rx_control_frames = port_stats->rx_control_frames;
352 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
359 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
362 drvs->rx_dropped_header_too_small =
363 port_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop =
365 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000366 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 drvs->rx_alignment_symbol_errors =
368 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->tx_pauseframes = port_stats->tx_pauseframes;
371 drvs->tx_controlframes = port_stats->tx_controlframes;
372 drvs->jabber_events = port_stats->jabber_events;
373 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->forwarded_packets = rxf_stats->forwarded_packets;
376 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
378 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
380}
381
Selvin Xavier005d5692011-05-16 07:36:35 +0000382static void populate_lancer_stats(struct be_adapter *adapter)
383{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 struct lancer_pport_stats *pport_stats =
387 pport_stats_from_cmd(adapter);
388
389 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
390 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
391 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
392 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
397 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
398 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
399 drvs->rx_dropped_tcp_length =
400 pport_stats->rx_dropped_invalid_tcp_length;
401 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
404 drvs->rx_dropped_header_too_small =
405 pport_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000407 drvs->rx_address_mismatch_drops =
408 pport_stats->rx_address_mismatch_drops +
409 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000411 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000412 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
413 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000414 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000415 drvs->forwarded_packets = pport_stats->num_forwards_lo;
416 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000417 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000418 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000419}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420
Sathya Perla09c1c682011-08-22 19:41:53 +0000421static void accumulate_16bit_val(u32 *acc, u16 val)
422{
423#define lo(x) (x & 0xFFFF)
424#define hi(x) (x & 0xFFFF0000)
425 bool wrapped = val < lo(*acc);
426 u32 newacc = hi(*acc) + val;
427
428 if (wrapped)
429 newacc += 65536;
430 ACCESS_ONCE(*acc) = newacc;
431}
432
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433void be_parse_stats(struct be_adapter *adapter)
434{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
436 struct be_rx_obj *rxo;
437 int i;
438
Selvin Xavier005d5692011-05-16 07:36:35 +0000439 if (adapter->generation == BE_GEN3) {
440 if (lancer_chip(adapter))
441 populate_lancer_stats(adapter);
442 else
443 populate_be3_stats(adapter);
444 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000445 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000446 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000447
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000448 if (lancer_chip(adapter))
449 goto done;
450
Sathya Perlaac124ff2011-07-25 19:10:14 +0000451 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000452 for_all_rx_queues(adapter, rxo, i) {
453 /* below erx HW counter can actually wrap around after
454 * 65535. Driver accumulates a 32-bit value
455 */
456 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
457 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
458 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000459done:
460 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461}
462
Sathya Perlaab1594e2011-07-25 19:10:15 +0000463static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
464 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700465{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000466 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000467 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700468 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000469 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000470 u64 pkts, bytes;
471 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700472 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700473
Sathya Perla3abcded2010-10-03 22:12:27 -0700474 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000475 const struct be_rx_stats *rx_stats = rx_stats(rxo);
476 do {
477 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
478 pkts = rx_stats(rxo)->rx_pkts;
479 bytes = rx_stats(rxo)->rx_bytes;
480 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
481 stats->rx_packets += pkts;
482 stats->rx_bytes += bytes;
483 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
484 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
485 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700486 }
487
Sathya Perla3c8def92011-06-12 20:01:58 +0000488 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 const struct be_tx_stats *tx_stats = tx_stats(txo);
490 do {
491 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
492 pkts = tx_stats(txo)->tx_pkts;
493 bytes = tx_stats(txo)->tx_bytes;
494 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
495 stats->tx_packets += pkts;
496 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000497 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498
499 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000500 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000501 drvs->rx_alignment_symbol_errors +
502 drvs->rx_in_range_errors +
503 drvs->rx_out_range_errors +
504 drvs->rx_frame_too_long +
505 drvs->rx_dropped_too_small +
506 drvs->rx_dropped_too_short +
507 drvs->rx_dropped_header_too_small +
508 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000509 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700510
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000512 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000513 drvs->rx_out_range_errors +
514 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000515
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517
518 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000519 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000520
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700521 /* receiver fifo overrun */
522 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000523 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000524 drvs->rx_input_fifo_overflow_drop +
525 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527}
528
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000529void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531 struct net_device *netdev = adapter->netdev;
532
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000533 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000534 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000535 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000537
538 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
539 netif_carrier_on(netdev);
540 else
541 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542}
543
Sathya Perla3c8def92011-06-12 20:01:58 +0000544static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000545 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546{
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 struct be_tx_stats *stats = tx_stats(txo);
548
Sathya Perlaab1594e2011-07-25 19:10:15 +0000549 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000550 stats->tx_reqs++;
551 stats->tx_wrbs += wrb_cnt;
552 stats->tx_bytes += copied;
553 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700554 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000556 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557}
558
559/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000560static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
561 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700563 int cnt = (skb->len > skb->data_len);
564
565 cnt += skb_shinfo(skb)->nr_frags;
566
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567 /* to account for hdr wrb */
568 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000569 if (lancer_chip(adapter) || !(cnt & 1)) {
570 *dummy = false;
571 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700572 /* add a dummy to make it an even num */
573 cnt++;
574 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000575 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
577 return cnt;
578}
579
580static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
581{
582 wrb->frag_pa_hi = upper_32_bits(addr);
583 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
584 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000585 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586}
587
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000588static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
589 struct sk_buff *skb)
590{
591 u8 vlan_prio;
592 u16 vlan_tag;
593
594 vlan_tag = vlan_tx_tag_get(skb);
595 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
596 /* If vlan priority provided by OS is NOT in available bmap */
597 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
598 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
599 adapter->recommended_prio;
600
601 return vlan_tag;
602}
603
Somnath Kotur93040ae2012-06-26 22:32:10 +0000604static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
605{
606 return vlan_tx_tag_present(skb) || adapter->pvid;
607}
608
Somnath Koturcc4ce022010-10-21 07:11:14 -0700609static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
610 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000612 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700613
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700614 memset(hdr, 0, sizeof(*hdr));
615
616 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
617
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000618 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
621 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000622 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
625 if (is_tcp_pkt(skb))
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
627 else if (is_udp_pkt(skb))
628 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
629 }
630
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700631 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000633 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700634 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 }
636
637 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
638 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
639 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
640 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
641}
642
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000643static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000644 bool unmap_single)
645{
646 dma_addr_t dma;
647
648 be_dws_le_to_cpu(wrb, sizeof(*wrb));
649
650 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000651 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000652 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000653 dma_unmap_single(dev, dma, wrb->frag_len,
654 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000655 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000656 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000657 }
658}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659
Sathya Perla3c8def92011-06-12 20:01:58 +0000660static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
662{
Sathya Perla7101e112010-03-22 20:41:12 +0000663 dma_addr_t busaddr;
664 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000665 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667 struct be_eth_wrb *wrb;
668 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000669 bool map_single = false;
670 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672 hdr = queue_head_node(txq);
673 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000674 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700677 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000678 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
679 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000680 goto dma_err;
681 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700682 wrb = queue_head_node(txq);
683 wrb_fill(wrb, busaddr, len);
684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
686 copied += len;
687 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688
David S. Millerebc8d2a2009-06-09 01:01:31 -0700689 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000690 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700691 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000692 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000693 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000694 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000695 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700696 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000697 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700698 be_dws_cpu_to_le(wrb, sizeof(*wrb));
699 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000700 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701 }
702
703 if (dummy_wrb) {
704 wrb = queue_head_node(txq);
705 wrb_fill(wrb, 0, 0);
706 be_dws_cpu_to_le(wrb, sizeof(*wrb));
707 queue_head_inc(txq);
708 }
709
Somnath Koturcc4ce022010-10-21 07:11:14 -0700710 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711 be_dws_cpu_to_le(hdr, sizeof(*hdr));
712
713 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000714dma_err:
715 txq->head = map_head;
716 while (copied) {
717 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000718 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000719 map_single = false;
720 copied -= wrb->frag_len;
721 queue_head_inc(txq);
722 }
723 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724}
725
Somnath Kotur93040ae2012-06-26 22:32:10 +0000726static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
727 struct sk_buff *skb)
728{
729 u16 vlan_tag = 0;
730
731 skb = skb_share_check(skb, GFP_ATOMIC);
732 if (unlikely(!skb))
733 return skb;
734
735 if (vlan_tx_tag_present(skb)) {
736 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
737 __vlan_put_tag(skb, vlan_tag);
738 skb->vlan_tci = 0;
739 }
740
741 return skb;
742}
743
Stephen Hemminger613573252009-08-31 19:50:58 +0000744static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700745 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746{
747 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000748 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
749 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000750 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000752 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753 bool dummy_wrb, stopped = false;
754
Somnath Kotur93040ae2012-06-26 22:32:10 +0000755 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
756 VLAN_ETH_HLEN : ETH_HLEN;
757
758 /* HW has a bug which considers padding bytes as legal
759 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000760 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000761 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
762 is_ipv4_pkt(skb)) {
763 ip = (struct iphdr *)ip_hdr(skb);
764 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
765 }
766
767 /* HW has a bug wherein it will calculate CSUM for VLAN
768 * pkts even though it is disabled.
769 * Manually insert VLAN in pkt.
770 */
771 if (skb->ip_summed != CHECKSUM_PARTIAL &&
772 be_vlan_tag_chk(adapter, skb)) {
773 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000774 if (unlikely(!skb))
775 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000776 }
777
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000778 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779
Sathya Perla3c8def92011-06-12 20:01:58 +0000780 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000781 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000782 int gso_segs = skb_shinfo(skb)->gso_segs;
783
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000784 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000785 BUG_ON(txo->sent_skb_list[start]);
786 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000788 /* Ensure txq has space for the next skb; Else stop the queue
789 * *BEFORE* ringing the tx doorbell, so that we serialze the
790 * tx compls of the current transmit which'll wake up the queue
791 */
Sathya Perla7101e112010-03-22 20:41:12 +0000792 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000793 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
794 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000795 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000796 stopped = true;
797 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000799 be_txq_notify(adapter, txq->id, wrb_cnt);
800
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000801 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000802 } else {
803 txq->head = start;
804 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000806tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807 return NETDEV_TX_OK;
808}
809
810static int be_change_mtu(struct net_device *netdev, int new_mtu)
811{
812 struct be_adapter *adapter = netdev_priv(netdev);
813 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000814 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
815 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 dev_info(&adapter->pdev->dev,
817 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000818 BE_MIN_MTU,
819 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820 return -EINVAL;
821 }
822 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
823 netdev->mtu, new_mtu);
824 netdev->mtu = new_mtu;
825 return 0;
826}
827
828/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000829 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
830 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831 */
Sathya Perla10329df2012-06-05 19:37:18 +0000832static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833{
Sathya Perla10329df2012-06-05 19:37:18 +0000834 u16 vids[BE_NUM_VLANS_SUPPORTED];
835 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000836 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000837
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000838 /* No need to further configure vids if in promiscuous mode */
839 if (adapter->promiscuous)
840 return 0;
841
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000842 if (adapter->vlans_added > adapter->max_vlans)
843 goto set_vlan_promisc;
844
845 /* Construct VLAN Table to give to HW */
846 for (i = 0; i < VLAN_N_VID; i++)
847 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000848 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000849
850 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000851 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000852
853 /* Set to VLAN promisc mode as setting VLAN filter failed */
854 if (status) {
855 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
856 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
857 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000859
Sathya Perlab31c50a2009-09-17 10:30:13 -0700860 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000861
862set_vlan_promisc:
863 status = be_cmd_vlan_config(adapter, adapter->if_handle,
864 NULL, 0, 1, 1);
865 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700866}
867
Jiri Pirko8e586132011-12-08 19:52:37 -0500868static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869{
870 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000871 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000873 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000874 status = -EINVAL;
875 goto ret;
876 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000877
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000878 /* Packets with VID 0 are always received by Lancer by default */
879 if (lancer_chip(adapter) && vid == 0)
880 goto ret;
881
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700882 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000883 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000884 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500885
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000886 if (!status)
887 adapter->vlans_added++;
888 else
889 adapter->vlan_tag[vid] = 0;
890ret:
891 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892}
893
Jiri Pirko8e586132011-12-08 19:52:37 -0500894static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700895{
896 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000897 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700898
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000899 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000900 status = -EINVAL;
901 goto ret;
902 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000903
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000904 /* Packets with VID 0 are always received by Lancer by default */
905 if (lancer_chip(adapter) && vid == 0)
906 goto ret;
907
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000909 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000910 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500911
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000912 if (!status)
913 adapter->vlans_added--;
914 else
915 adapter->vlan_tag[vid] = 1;
916ret:
917 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918}
919
Sathya Perlaa54769f2011-10-24 02:45:00 +0000920static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700921{
922 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000923 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700924
925 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000926 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000927 adapter->promiscuous = true;
928 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700929 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000930
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300931 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000932 if (adapter->promiscuous) {
933 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000934 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000935
936 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000937 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000938 }
939
Sathya Perlae7b909a2009-11-22 22:01:10 +0000940 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000941 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000942 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000943 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000944 goto done;
945 }
946
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000947 if (netdev_uc_count(netdev) != adapter->uc_macs) {
948 struct netdev_hw_addr *ha;
949 int i = 1; /* First slot is claimed by the Primary MAC */
950
951 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
952 be_cmd_pmac_del(adapter, adapter->if_handle,
953 adapter->pmac_id[i], 0);
954 }
955
956 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
957 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
958 adapter->promiscuous = true;
959 goto done;
960 }
961
962 netdev_for_each_uc_addr(ha, adapter->netdev) {
963 adapter->uc_macs++; /* First slot is for Primary MAC */
964 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
965 adapter->if_handle,
966 &adapter->pmac_id[adapter->uc_macs], 0);
967 }
968 }
969
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000970 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
971
972 /* Set to MCAST promisc mode if setting MULTICAST address fails */
973 if (status) {
974 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
975 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
976 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
977 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000978done:
979 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700980}
981
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000982static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
983{
984 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000985 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000986 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000987 bool active_mac = false;
988 u32 pmac_id;
989 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000990
Sathya Perla11ac75e2011-12-13 00:58:50 +0000991 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000992 return -EPERM;
993
Sathya Perla11ac75e2011-12-13 00:58:50 +0000994 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000995 return -EINVAL;
996
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000997 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000998 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
999 &pmac_id, vf + 1);
1000 if (!status && active_mac)
1001 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1002 pmac_id, vf + 1);
1003
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001004 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1005 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001006 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1007 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001008
Sathya Perla11ac75e2011-12-13 00:58:50 +00001009 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1010 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001011 }
1012
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001013 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001014 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1015 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001016 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001017 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001018
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001019 return status;
1020}
1021
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001022static int be_get_vf_config(struct net_device *netdev, int vf,
1023 struct ifla_vf_info *vi)
1024{
1025 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001026 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001027
Sathya Perla11ac75e2011-12-13 00:58:50 +00001028 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001029 return -EPERM;
1030
Sathya Perla11ac75e2011-12-13 00:58:50 +00001031 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001032 return -EINVAL;
1033
1034 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001035 vi->tx_rate = vf_cfg->tx_rate;
1036 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001037 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001038 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001039
1040 return 0;
1041}
1042
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001043static int be_set_vf_vlan(struct net_device *netdev,
1044 int vf, u16 vlan, u8 qos)
1045{
1046 struct be_adapter *adapter = netdev_priv(netdev);
1047 int status = 0;
1048
Sathya Perla11ac75e2011-12-13 00:58:50 +00001049 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001050 return -EPERM;
1051
Sathya Perla11ac75e2011-12-13 00:58:50 +00001052 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001053 return -EINVAL;
1054
1055 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001056 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1057 /* If this is new value, program it. Else skip. */
1058 adapter->vf_cfg[vf].vlan_tag = vlan;
1059
1060 status = be_cmd_set_hsw_config(adapter, vlan,
1061 vf + 1, adapter->vf_cfg[vf].if_handle);
1062 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001063 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001064 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001065 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001066 vlan = adapter->vf_cfg[vf].def_vid;
1067 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1068 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001069 }
1070
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001071
1072 if (status)
1073 dev_info(&adapter->pdev->dev,
1074 "VLAN %d config on VF %d failed\n", vlan, vf);
1075 return status;
1076}
1077
Ajit Khapardee1d18732010-07-23 01:52:13 +00001078static int be_set_vf_tx_rate(struct net_device *netdev,
1079 int vf, int rate)
1080{
1081 struct be_adapter *adapter = netdev_priv(netdev);
1082 int status = 0;
1083
Sathya Perla11ac75e2011-12-13 00:58:50 +00001084 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001085 return -EPERM;
1086
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001087 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001088 return -EINVAL;
1089
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001090 if (rate < 100 || rate > 10000) {
1091 dev_err(&adapter->pdev->dev,
1092 "tx rate must be between 100 and 10000 Mbps\n");
1093 return -EINVAL;
1094 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001095
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001096 if (lancer_chip(adapter))
1097 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1098 else
1099 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001100
1101 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001102 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001103 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001104 else
1105 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001106 return status;
1107}
1108
Sathya Perla39f1d942012-05-08 19:41:24 +00001109static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1110{
1111 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001112 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001113 u16 offset, stride;
1114
1115 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001116 if (!pos)
1117 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001118 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1119 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1120
1121 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1122 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001123 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001124 vfs++;
1125 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1126 assigned_vfs++;
1127 }
1128 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1129 }
1130 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1131}
1132
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001133static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001135 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001136 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001137 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001138 u64 pkts;
1139 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001140
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001141 if (!eqo->enable_aic) {
1142 eqd = eqo->eqd;
1143 goto modify_eqd;
1144 }
1145
1146 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001147 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001149 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1150
Sathya Perla4097f662009-03-24 16:40:13 -07001151 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001152 if (time_before(now, stats->rx_jiffies)) {
1153 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001154 return;
1155 }
1156
Sathya Perlaac124ff2011-07-25 19:10:14 +00001157 /* Update once a second */
1158 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001159 return;
1160
Sathya Perlaab1594e2011-07-25 19:10:15 +00001161 do {
1162 start = u64_stats_fetch_begin_bh(&stats->sync);
1163 pkts = stats->rx_pkts;
1164 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1165
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001166 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001167 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001168 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001169 eqd = (stats->rx_pps / 110000) << 3;
1170 eqd = min(eqd, eqo->max_eqd);
1171 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001172 if (eqd < 10)
1173 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001174
1175modify_eqd:
1176 if (eqd != eqo->cur_eqd) {
1177 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1178 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001179 }
Sathya Perla4097f662009-03-24 16:40:13 -07001180}
1181
Sathya Perla3abcded2010-10-03 22:12:27 -07001182static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001183 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001184{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001185 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001186
Sathya Perlaab1594e2011-07-25 19:10:15 +00001187 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001188 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001189 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001190 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001191 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001192 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001193 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001194 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001195 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196}
1197
Sathya Perla2e588f82011-03-11 02:49:26 +00001198static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001199{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001200 /* L4 checksum is not reliable for non TCP/UDP packets.
1201 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001202 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1203 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001204}
1205
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001206static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1207 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001209 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001211 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212
Sathya Perla3abcded2010-10-03 22:12:27 -07001213 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214 BUG_ON(!rx_page_info->page);
1215
Ajit Khaparde205859a2010-02-09 01:34:21 +00001216 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001217 dma_unmap_page(&adapter->pdev->dev,
1218 dma_unmap_addr(rx_page_info, bus),
1219 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001220 rx_page_info->last_page_user = false;
1221 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222
1223 atomic_dec(&rxq->used);
1224 return rx_page_info;
1225}
1226
1227/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001228static void be_rx_compl_discard(struct be_rx_obj *rxo,
1229 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230{
Sathya Perla3abcded2010-10-03 22:12:27 -07001231 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001233 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001235 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001236 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001237 put_page(page_info->page);
1238 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001239 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240 }
1241}
1242
1243/*
1244 * skb_fill_rx_data forms a complete skb for an ether frame
1245 * indicated by rxcp.
1246 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001247static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1248 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249{
Sathya Perla3abcded2010-10-03 22:12:27 -07001250 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001252 u16 i, j;
1253 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254 u8 *start;
1255
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001256 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257 start = page_address(page_info->page) + page_info->page_offset;
1258 prefetch(start);
1259
1260 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001261 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001262
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263 skb->len = curr_frag_len;
1264 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001265 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266 /* Complete packet has now been moved to data */
1267 put_page(page_info->page);
1268 skb->data_len = 0;
1269 skb->tail += curr_frag_len;
1270 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001271 hdr_len = ETH_HLEN;
1272 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001273 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001274 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275 skb_shinfo(skb)->frags[0].page_offset =
1276 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001277 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001279 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280 skb->tail += hdr_len;
1281 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001282 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283
Sathya Perla2e588f82011-03-11 02:49:26 +00001284 if (rxcp->pkt_size <= rx_frag_size) {
1285 BUG_ON(rxcp->num_rcvd != 1);
1286 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287 }
1288
1289 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001290 index_inc(&rxcp->rxq_idx, rxq->len);
1291 remaining = rxcp->pkt_size - curr_frag_len;
1292 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001293 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001294 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001296 /* Coalesce all frags from the same physical page in one slot */
1297 if (page_info->page_offset == 0) {
1298 /* Fresh page */
1299 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001300 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001301 skb_shinfo(skb)->frags[j].page_offset =
1302 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001303 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001304 skb_shinfo(skb)->nr_frags++;
1305 } else {
1306 put_page(page_info->page);
1307 }
1308
Eric Dumazet9e903e02011-10-18 21:00:24 +00001309 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310 skb->len += curr_frag_len;
1311 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001312 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001313 remaining -= curr_frag_len;
1314 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001315 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001317 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318}
1319
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001320/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001321static void be_rx_compl_process(struct be_rx_obj *rxo,
1322 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001323{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001324 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001325 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001327
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001328 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001329 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001330 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001331 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332 return;
1333 }
1334
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001335 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001337 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001338 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001339 else
1340 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001342 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001343 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001344 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001345 skb->rxhash = rxcp->rss_hash;
1346
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347
Jiri Pirko343e43c2011-08-25 02:50:51 +00001348 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001349 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1350
1351 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352}
1353
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001354/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001355void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1356 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001358 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001360 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001361 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001362 u16 remaining, curr_frag_len;
1363 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001364
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001365 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001366 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001367 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001368 return;
1369 }
1370
Sathya Perla2e588f82011-03-11 02:49:26 +00001371 remaining = rxcp->pkt_size;
1372 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001373 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374
1375 curr_frag_len = min(remaining, rx_frag_size);
1376
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001377 /* Coalesce all frags from the same physical page in one slot */
1378 if (i == 0 || page_info->page_offset == 0) {
1379 /* First frag or Fresh page */
1380 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001381 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001382 skb_shinfo(skb)->frags[j].page_offset =
1383 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001384 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001385 } else {
1386 put_page(page_info->page);
1387 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001388 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001389 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001391 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 memset(page_info, 0, sizeof(*page_info));
1393 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001394 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001396 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001397 skb->len = rxcp->pkt_size;
1398 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001399 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001400 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001401 if (adapter->netdev->features & NETIF_F_RXHASH)
1402 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001403
Jiri Pirko343e43c2011-08-25 02:50:51 +00001404 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001405 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1406
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001407 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408}
1409
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001410static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412{
Sathya Perla2e588f82011-03-11 02:49:26 +00001413 rxcp->pkt_size =
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001419 rxcp->ip_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1421 rxcp->l4_csum =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1423 rxcp->ipv6 =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1425 rxcp->rxq_idx =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1427 rxcp->num_rcvd =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1429 rxcp->pkt_type =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001431 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001432 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001433 if (rxcp->vlanf) {
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001435 compl);
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1437 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001438 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001440}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001442static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1443 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001444{
1445 rxcp->pkt_size =
1446 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1447 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1448 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1449 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001450 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001451 rxcp->ip_csum =
1452 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1453 rxcp->l4_csum =
1454 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1455 rxcp->ipv6 =
1456 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1457 rxcp->rxq_idx =
1458 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1459 rxcp->num_rcvd =
1460 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1461 rxcp->pkt_type =
1462 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001463 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001464 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001465 if (rxcp->vlanf) {
1466 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001467 compl);
1468 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1469 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001470 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001471 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001472}
1473
1474static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1475{
1476 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1477 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1478 struct be_adapter *adapter = rxo->adapter;
1479
1480 /* For checking the valid bit it is Ok to use either definition as the
1481 * valid bit is at the same position in both v0 and v1 Rx compl */
1482 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 return NULL;
1484
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001485 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001486 be_dws_le_to_cpu(compl, sizeof(*compl));
1487
1488 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001489 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001490 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001491 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001492
Sathya Perla15d72182011-03-21 20:49:26 +00001493 if (rxcp->vlanf) {
1494 /* vlanf could be wrongly set in some cards.
1495 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001496 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001497 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001498
Sathya Perla15d72182011-03-21 20:49:26 +00001499 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001500 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001501
Somnath Kotur939cf302011-08-18 21:51:49 -07001502 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001503 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001504 rxcp->vlanf = 0;
1505 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001506
1507 /* As the compl has been parsed, reset it; we wont touch it again */
1508 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509
Sathya Perla3abcded2010-10-03 22:12:27 -07001510 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 return rxcp;
1512}
1513
Eric Dumazet1829b082011-03-01 05:48:12 +00001514static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001517
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001519 gfp |= __GFP_COMP;
1520 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521}
1522
1523/*
1524 * Allocate a page, split it to fragments of size rx_frag_size and post as
1525 * receive buffers to BE
1526 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001527static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528{
Sathya Perla3abcded2010-10-03 22:12:27 -07001529 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001530 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001531 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001532 struct page *pagep = NULL;
1533 struct be_eth_rx_d *rxd;
1534 u64 page_dmaaddr = 0, frag_dmaaddr;
1535 u32 posted, page_offset = 0;
1536
Sathya Perla3abcded2010-10-03 22:12:27 -07001537 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1539 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001540 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001542 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543 break;
1544 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001545 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1546 0, adapter->big_page_size,
1547 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 page_info->page_offset = 0;
1549 } else {
1550 get_page(pagep);
1551 page_info->page_offset = page_offset + rx_frag_size;
1552 }
1553 page_offset = page_info->page_offset;
1554 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001555 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1557
1558 rxd = queue_head_node(rxq);
1559 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1560 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561
1562 /* Any space left in the current big page for another frag? */
1563 if ((page_offset + rx_frag_size + rx_frag_size) >
1564 adapter->big_page_size) {
1565 pagep = NULL;
1566 page_info->last_page_user = true;
1567 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001568
1569 prev_page_info = page_info;
1570 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001571 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 }
1573 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001574 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
1576 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001578 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001579 } else if (atomic_read(&rxq->used) == 0) {
1580 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001581 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583}
1584
Sathya Perla5fb379e2009-06-18 00:02:59 +00001585static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1588
1589 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1590 return NULL;
1591
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001592 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1594
1595 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1596
1597 queue_tail_inc(tx_cq);
1598 return txcp;
1599}
1600
Sathya Perla3c8def92011-06-12 20:01:58 +00001601static u16 be_tx_compl_process(struct be_adapter *adapter,
1602 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603{
Sathya Perla3c8def92011-06-12 20:01:58 +00001604 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001605 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001606 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001608 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1609 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001611 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001613 sent_skbs[txq->tail] = NULL;
1614
1615 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001616 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001618 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001620 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001621 unmap_tx_frag(&adapter->pdev->dev, wrb,
1622 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001623 unmap_skb_hdr = false;
1624
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625 num_wrbs++;
1626 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001627 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001630 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631}
1632
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001633/* Return the number of events in the event queue */
1634static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001635{
1636 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001637 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001638
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001639 do {
1640 eqe = queue_tail_node(&eqo->q);
1641 if (eqe->evt == 0)
1642 break;
1643
1644 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001645 eqe->evt = 0;
1646 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001647 queue_tail_inc(&eqo->q);
1648 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001649
1650 return num;
1651}
1652
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001653static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001654{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001655 bool rearm = false;
1656 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001657
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001658 /* Deal with any spurious interrupts that come without events */
1659 if (!num)
1660 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001661
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001662 if (num || msix_enabled(eqo->adapter))
1663 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1664
Sathya Perla859b1e42009-08-10 03:43:51 +00001665 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001666 napi_schedule(&eqo->napi);
1667
1668 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001669}
1670
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001671/* Leaves the EQ is disarmed state */
1672static void be_eq_clean(struct be_eq_obj *eqo)
1673{
1674 int num = events_get(eqo);
1675
1676 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1677}
1678
1679static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680{
1681 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001682 struct be_queue_info *rxq = &rxo->q;
1683 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001684 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685 u16 tail;
1686
1687 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001688 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001689 be_rx_compl_discard(rxo, rxcp);
1690 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691 }
1692
1693 /* Then free posted rx buffer that were not used */
1694 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001695 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001696 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697 put_page(page_info->page);
1698 memset(page_info, 0, sizeof(*page_info));
1699 }
1700 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001701 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702}
1703
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001704static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001706 struct be_tx_obj *txo;
1707 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001708 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001709 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001710 struct sk_buff *sent_skb;
1711 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001712 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713
Sathya Perlaa8e91792009-08-10 03:42:43 +00001714 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1715 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001716 pending_txqs = adapter->num_tx_qs;
1717
1718 for_all_tx_queues(adapter, txo, i) {
1719 txq = &txo->q;
1720 while ((txcp = be_tx_compl_get(&txo->cq))) {
1721 end_idx =
1722 AMAP_GET_BITS(struct amap_eth_tx_compl,
1723 wrb_index, txcp);
1724 num_wrbs += be_tx_compl_process(adapter, txo,
1725 end_idx);
1726 cmpl++;
1727 }
1728 if (cmpl) {
1729 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1730 atomic_sub(num_wrbs, &txq->used);
1731 cmpl = 0;
1732 num_wrbs = 0;
1733 }
1734 if (atomic_read(&txq->used) == 0)
1735 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001736 }
1737
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001738 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001739 break;
1740
1741 mdelay(1);
1742 } while (true);
1743
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001744 for_all_tx_queues(adapter, txo, i) {
1745 txq = &txo->q;
1746 if (atomic_read(&txq->used))
1747 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1748 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001749
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001750 /* free posted tx for which compls will never arrive */
1751 while (atomic_read(&txq->used)) {
1752 sent_skb = txo->sent_skb_list[txq->tail];
1753 end_idx = txq->tail;
1754 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1755 &dummy_wrb);
1756 index_adv(&end_idx, num_wrbs - 1, txq->len);
1757 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1758 atomic_sub(num_wrbs, &txq->used);
1759 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001760 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761}
1762
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001763static void be_evt_queues_destroy(struct be_adapter *adapter)
1764{
1765 struct be_eq_obj *eqo;
1766 int i;
1767
1768 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001769 if (eqo->q.created) {
1770 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001771 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001772 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001773 be_queue_free(adapter, &eqo->q);
1774 }
1775}
1776
1777static int be_evt_queues_create(struct be_adapter *adapter)
1778{
1779 struct be_queue_info *eq;
1780 struct be_eq_obj *eqo;
1781 int i, rc;
1782
1783 adapter->num_evt_qs = num_irqs(adapter);
1784
1785 for_all_evt_queues(adapter, eqo, i) {
1786 eqo->adapter = adapter;
1787 eqo->tx_budget = BE_TX_BUDGET;
1788 eqo->idx = i;
1789 eqo->max_eqd = BE_MAX_EQD;
1790 eqo->enable_aic = true;
1791
1792 eq = &eqo->q;
1793 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1794 sizeof(struct be_eq_entry));
1795 if (rc)
1796 return rc;
1797
1798 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1799 if (rc)
1800 return rc;
1801 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001802 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001803}
1804
Sathya Perla5fb379e2009-06-18 00:02:59 +00001805static void be_mcc_queues_destroy(struct be_adapter *adapter)
1806{
1807 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001808
Sathya Perla8788fdc2009-07-27 22:52:03 +00001809 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001810 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001811 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001812 be_queue_free(adapter, q);
1813
Sathya Perla8788fdc2009-07-27 22:52:03 +00001814 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001815 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001816 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001817 be_queue_free(adapter, q);
1818}
1819
1820/* Must be called only after TX qs are created as MCC shares TX EQ */
1821static int be_mcc_queues_create(struct be_adapter *adapter)
1822{
1823 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001824
Sathya Perla8788fdc2009-07-27 22:52:03 +00001825 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001826 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001827 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001828 goto err;
1829
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001830 /* Use the default EQ for MCC completions */
1831 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001832 goto mcc_cq_free;
1833
Sathya Perla8788fdc2009-07-27 22:52:03 +00001834 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001835 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1836 goto mcc_cq_destroy;
1837
Sathya Perla8788fdc2009-07-27 22:52:03 +00001838 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001839 goto mcc_q_free;
1840
1841 return 0;
1842
1843mcc_q_free:
1844 be_queue_free(adapter, q);
1845mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001846 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001847mcc_cq_free:
1848 be_queue_free(adapter, cq);
1849err:
1850 return -1;
1851}
1852
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853static void be_tx_queues_destroy(struct be_adapter *adapter)
1854{
1855 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001856 struct be_tx_obj *txo;
1857 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858
Sathya Perla3c8def92011-06-12 20:01:58 +00001859 for_all_tx_queues(adapter, txo, i) {
1860 q = &txo->q;
1861 if (q->created)
1862 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1863 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864
Sathya Perla3c8def92011-06-12 20:01:58 +00001865 q = &txo->cq;
1866 if (q->created)
1867 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1868 be_queue_free(adapter, q);
1869 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870}
1871
Sathya Perladafc0fe2011-10-24 02:45:02 +00001872static int be_num_txqs_want(struct be_adapter *adapter)
1873{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001874 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1875 be_is_mc(adapter) ||
1876 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perla39f1d942012-05-08 19:41:24 +00001877 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001878 return 1;
1879 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001880 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001881}
1882
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001885 struct be_queue_info *cq, *eq;
1886 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001887 struct be_tx_obj *txo;
1888 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889
Sathya Perladafc0fe2011-10-24 02:45:02 +00001890 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001891 if (adapter->num_tx_qs != MAX_TX_QS) {
1892 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001893 netif_set_real_num_tx_queues(adapter->netdev,
1894 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001895 rtnl_unlock();
1896 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001897
Sathya Perla3c8def92011-06-12 20:01:58 +00001898 for_all_tx_queues(adapter, txo, i) {
1899 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001900 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1901 sizeof(struct be_eth_tx_compl));
1902 if (status)
1903 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001905 /* If num_evt_qs is less than num_tx_qs, then more than
1906 * one txq share an eq
1907 */
1908 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1909 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1910 if (status)
1911 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001912 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914}
1915
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001916static int be_tx_qs_create(struct be_adapter *adapter)
1917{
1918 struct be_tx_obj *txo;
1919 int i, status;
1920
1921 for_all_tx_queues(adapter, txo, i) {
1922 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1923 sizeof(struct be_eth_wrb));
1924 if (status)
1925 return status;
1926
1927 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1928 if (status)
1929 return status;
1930 }
1931
Sathya Perlad3791422012-09-28 04:39:44 +00001932 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1933 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001934 return 0;
1935}
1936
1937static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938{
1939 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001940 struct be_rx_obj *rxo;
1941 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942
Sathya Perla3abcded2010-10-03 22:12:27 -07001943 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001944 q = &rxo->cq;
1945 if (q->created)
1946 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1947 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949}
1950
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001951static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001952{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001953 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001954 struct be_rx_obj *rxo;
1955 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001957 /* We'll create as many RSS rings as there are irqs.
1958 * But when there's only one irq there's no use creating RSS rings
1959 */
1960 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1961 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001962 if (adapter->num_rx_qs != MAX_RX_QS) {
1963 rtnl_lock();
1964 netif_set_real_num_rx_queues(adapter->netdev,
1965 adapter->num_rx_qs);
1966 rtnl_unlock();
1967 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001968
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001970 for_all_rx_queues(adapter, rxo, i) {
1971 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001972 cq = &rxo->cq;
1973 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1974 sizeof(struct be_eth_rx_compl));
1975 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001976 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1979 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001980 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001982 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983
Sathya Perlad3791422012-09-28 04:39:44 +00001984 dev_info(&adapter->pdev->dev,
1985 "created %d RSS queue(s) and 1 default RX queue\n",
1986 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001987 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001988}
1989
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001990static irqreturn_t be_intx(int irq, void *dev)
1991{
1992 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001995 /* With INTx only one EQ is used */
1996 num_evts = event_handle(&adapter->eq_obj[0]);
1997 if (num_evts)
1998 return IRQ_HANDLED;
1999 else
2000 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001}
2002
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002003static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002005 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008 return IRQ_HANDLED;
2009}
2010
Sathya Perla2e588f82011-03-11 02:49:26 +00002011static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012{
Sathya Perla2e588f82011-03-11 02:49:26 +00002013 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002014}
2015
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2017 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018{
Sathya Perla3abcded2010-10-03 22:12:27 -07002019 struct be_adapter *adapter = rxo->adapter;
2020 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002021 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022 u32 work_done;
2023
2024 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002025 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026 if (!rxcp)
2027 break;
2028
Sathya Perla12004ae2011-08-02 19:57:46 +00002029 /* Is it a flush compl that has no data */
2030 if (unlikely(rxcp->num_rcvd == 0))
2031 goto loop_continue;
2032
2033 /* Discard compl with partial DMA Lancer B0 */
2034 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002035 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002036 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002037 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002038
Sathya Perla12004ae2011-08-02 19:57:46 +00002039 /* On BE drop pkts that arrive due to imperfect filtering in
2040 * promiscuous mode on some skews
2041 */
2042 if (unlikely(rxcp->port != adapter->port_num &&
2043 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002045 goto loop_continue;
2046 }
2047
2048 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002049 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002050 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002051 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002052loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002053 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002054 }
2055
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002056 if (work_done) {
2057 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002058
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002059 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2060 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002061 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002062
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063 return work_done;
2064}
2065
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002066static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2067 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002070 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002072 for (work_done = 0; work_done < budget; work_done++) {
2073 txcp = be_tx_compl_get(&txo->cq);
2074 if (!txcp)
2075 break;
2076 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002077 AMAP_GET_BITS(struct amap_eth_tx_compl,
2078 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002079 }
2080
2081 if (work_done) {
2082 be_cq_notify(adapter, txo->cq.id, true, work_done);
2083 atomic_sub(num_wrbs, &txo->q.used);
2084
2085 /* As Tx wrbs have been freed up, wake up netdev queue
2086 * if it was stopped due to lack of tx wrbs. */
2087 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2088 atomic_read(&txo->q.used) < txo->q.len / 2) {
2089 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002090 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002091
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002092 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2093 tx_stats(txo)->tx_compl += work_done;
2094 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2095 }
2096 return (work_done < budget); /* Done */
2097}
Sathya Perla3c8def92011-06-12 20:01:58 +00002098
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099int be_poll(struct napi_struct *napi, int budget)
2100{
2101 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2102 struct be_adapter *adapter = eqo->adapter;
2103 int max_work = 0, work, i;
2104 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002105
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106 /* Process all TXQs serviced by this EQ */
2107 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2108 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2109 eqo->tx_budget, i);
2110 if (!tx_done)
2111 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112 }
2113
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002114 /* This loop will iterate twice for EQ0 in which
2115 * completions of the last RXQ (default one) are also processed
2116 * For other EQs the loop iterates only once
2117 */
2118 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2119 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2120 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002121 }
2122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002123 if (is_mcc_eqo(eqo))
2124 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002125
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 if (max_work < budget) {
2127 napi_complete(napi);
2128 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2129 } else {
2130 /* As we'll continue in polling mode, count and clear events */
2131 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002132 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002133 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134}
2135
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002136void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002137{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002138 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2139 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002140 u32 i;
2141
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002142 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002143 return;
2144
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002145 if (lancer_chip(adapter)) {
2146 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2147 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2148 sliport_err1 = ioread32(adapter->db +
2149 SLIPORT_ERROR1_OFFSET);
2150 sliport_err2 = ioread32(adapter->db +
2151 SLIPORT_ERROR2_OFFSET);
2152 }
2153 } else {
2154 pci_read_config_dword(adapter->pdev,
2155 PCICFG_UE_STATUS_LOW, &ue_lo);
2156 pci_read_config_dword(adapter->pdev,
2157 PCICFG_UE_STATUS_HIGH, &ue_hi);
2158 pci_read_config_dword(adapter->pdev,
2159 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2160 pci_read_config_dword(adapter->pdev,
2161 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002162
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002163 ue_lo = (ue_lo & ~ue_lo_mask);
2164 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002165 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002166
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002167 /* On certain platforms BE hardware can indicate spurious UEs.
2168 * Allow the h/w to stop working completely in case of a real UE.
2169 * Hence not setting the hw_error for UE detection.
2170 */
2171 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002172 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002173 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002174 "Error detected in the card\n");
2175 }
2176
2177 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2178 dev_err(&adapter->pdev->dev,
2179 "ERR: sliport status 0x%x\n", sliport_status);
2180 dev_err(&adapter->pdev->dev,
2181 "ERR: sliport error1 0x%x\n", sliport_err1);
2182 dev_err(&adapter->pdev->dev,
2183 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002184 }
2185
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002186 if (ue_lo) {
2187 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2188 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002189 dev_err(&adapter->pdev->dev,
2190 "UE: %s bit set\n", ue_status_low_desc[i]);
2191 }
2192 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002193
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002194 if (ue_hi) {
2195 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2196 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002197 dev_err(&adapter->pdev->dev,
2198 "UE: %s bit set\n", ue_status_hi_desc[i]);
2199 }
2200 }
2201
2202}
2203
Sathya Perla8d56ff12009-11-22 22:02:26 +00002204static void be_msix_disable(struct be_adapter *adapter)
2205{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002206 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002207 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002208 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002209 }
2210}
2211
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212static uint be_num_rss_want(struct be_adapter *adapter)
2213{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002214 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002215
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002216 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002217 (lancer_chip(adapter) ||
2218 (!sriov_want(adapter) && be_physfn(adapter)))) {
2219 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002220 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2221 }
2222 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002223}
2224
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225static void be_msix_enable(struct be_adapter *adapter)
2226{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002227#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002228 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002229 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 /* If RSS queues are not used, need a vec for default RX Q */
2232 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002233 if (be_roce_supported(adapter)) {
2234 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2235 (num_online_cpus() + 1));
2236 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2237 num_vec += num_roce_vec;
2238 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2239 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002241
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002242 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243 adapter->msix_entries[i].entry = i;
2244
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002245 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002246 if (status == 0) {
2247 goto done;
2248 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002249 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002250 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002251 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002252 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002253 }
Sathya Perlad3791422012-09-28 04:39:44 +00002254
2255 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002256 return;
2257done:
Parav Pandit045508a2012-03-26 14:27:13 +00002258 if (be_roce_supported(adapter)) {
2259 if (num_vec > num_roce_vec) {
2260 adapter->num_msix_vec = num_vec - num_roce_vec;
2261 adapter->num_msix_roce_vec =
2262 num_vec - adapter->num_msix_vec;
2263 } else {
2264 adapter->num_msix_vec = num_vec;
2265 adapter->num_msix_roce_vec = 0;
2266 }
2267 } else
2268 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002269 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002270 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271}
2272
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002273static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002276 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277}
2278
2279static int be_msix_register(struct be_adapter *adapter)
2280{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 struct net_device *netdev = adapter->netdev;
2282 struct be_eq_obj *eqo;
2283 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285 for_all_evt_queues(adapter, eqo, i) {
2286 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2287 vec = be_msix_vec_get(adapter, eqo);
2288 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002289 if (status)
2290 goto err_msix;
2291 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002294err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2296 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2297 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2298 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002299 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300 return status;
2301}
2302
2303static int be_irq_register(struct be_adapter *adapter)
2304{
2305 struct net_device *netdev = adapter->netdev;
2306 int status;
2307
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002308 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 status = be_msix_register(adapter);
2310 if (status == 0)
2311 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002312 /* INTx is not supported for VF */
2313 if (!be_physfn(adapter))
2314 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002315 }
2316
2317 /* INTx */
2318 netdev->irq = adapter->pdev->irq;
2319 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2320 adapter);
2321 if (status) {
2322 dev_err(&adapter->pdev->dev,
2323 "INTx request IRQ failed - err %d\n", status);
2324 return status;
2325 }
2326done:
2327 adapter->isr_registered = true;
2328 return 0;
2329}
2330
2331static void be_irq_unregister(struct be_adapter *adapter)
2332{
2333 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002335 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002336
2337 if (!adapter->isr_registered)
2338 return;
2339
2340 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002341 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342 free_irq(netdev->irq, adapter);
2343 goto done;
2344 }
2345
2346 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002347 for_all_evt_queues(adapter, eqo, i)
2348 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002349
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002350done:
2351 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352}
2353
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002354static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002355{
2356 struct be_queue_info *q;
2357 struct be_rx_obj *rxo;
2358 int i;
2359
2360 for_all_rx_queues(adapter, rxo, i) {
2361 q = &rxo->q;
2362 if (q->created) {
2363 be_cmd_rxq_destroy(adapter, q);
2364 /* After the rxq is invalidated, wait for a grace time
2365 * of 1ms for all dma to end and the flush compl to
2366 * arrive
2367 */
2368 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002369 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002370 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002371 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002372 }
2373}
2374
Sathya Perla889cd4b2010-05-30 23:33:45 +00002375static int be_close(struct net_device *netdev)
2376{
2377 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 struct be_eq_obj *eqo;
2379 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002380
Parav Pandit045508a2012-03-26 14:27:13 +00002381 be_roce_dev_close(adapter);
2382
Sathya Perla889cd4b2010-05-30 23:33:45 +00002383 be_async_mcc_disable(adapter);
2384
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002385 if (!lancer_chip(adapter))
2386 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002387
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002388 for_all_evt_queues(adapter, eqo, i) {
2389 napi_disable(&eqo->napi);
2390 if (msix_enabled(adapter))
2391 synchronize_irq(be_msix_vec_get(adapter, eqo));
2392 else
2393 synchronize_irq(netdev->irq);
2394 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002395 }
2396
Sathya Perla889cd4b2010-05-30 23:33:45 +00002397 be_irq_unregister(adapter);
2398
Sathya Perla889cd4b2010-05-30 23:33:45 +00002399 /* Wait for all pending tx completions to arrive so that
2400 * all tx skbs are freed.
2401 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002402 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002403
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002404 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002405 return 0;
2406}
2407
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002408static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002409{
2410 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002411 int rc, i, j;
2412 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002413
2414 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2416 sizeof(struct be_eth_rx_d));
2417 if (rc)
2418 return rc;
2419 }
2420
2421 /* The FW would like the default RXQ to be created first */
2422 rxo = default_rxo(adapter);
2423 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2424 adapter->if_handle, false, &rxo->rss_id);
2425 if (rc)
2426 return rc;
2427
2428 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002429 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 rx_frag_size, adapter->if_handle,
2431 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002432 if (rc)
2433 return rc;
2434 }
2435
2436 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002437 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2438 for_all_rss_queues(adapter, rxo, i) {
2439 if ((j + i) >= 128)
2440 break;
2441 rsstable[j + i] = rxo->rss_id;
2442 }
2443 }
2444 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002445 if (rc)
2446 return rc;
2447 }
2448
2449 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002450 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002451 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002452 return 0;
2453}
2454
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002455static int be_open(struct net_device *netdev)
2456{
2457 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002458 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002459 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002460 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002461 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002462 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002463
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002464 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002465 if (status)
2466 goto err;
2467
Sathya Perla5fb379e2009-06-18 00:02:59 +00002468 be_irq_register(adapter);
2469
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002470 if (!lancer_chip(adapter))
2471 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002472
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002473 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002474 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002475
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002476 for_all_tx_queues(adapter, txo, i)
2477 be_cq_notify(adapter, txo->cq.id, true, 0);
2478
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002479 be_async_mcc_enable(adapter);
2480
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002481 for_all_evt_queues(adapter, eqo, i) {
2482 napi_enable(&eqo->napi);
2483 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2484 }
2485
Sathya Perla323ff712012-09-28 04:39:43 +00002486 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002487 if (!status)
2488 be_link_status_update(adapter, link_status);
2489
Parav Pandit045508a2012-03-26 14:27:13 +00002490 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002491 return 0;
2492err:
2493 be_close(adapter->netdev);
2494 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002495}
2496
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002497static int be_setup_wol(struct be_adapter *adapter, bool enable)
2498{
2499 struct be_dma_mem cmd;
2500 int status = 0;
2501 u8 mac[ETH_ALEN];
2502
2503 memset(mac, 0, ETH_ALEN);
2504
2505 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002506 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2507 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002508 if (cmd.va == NULL)
2509 return -1;
2510 memset(cmd.va, 0, cmd.size);
2511
2512 if (enable) {
2513 status = pci_write_config_dword(adapter->pdev,
2514 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2515 if (status) {
2516 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002517 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002518 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2519 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002520 return status;
2521 }
2522 status = be_cmd_enable_magic_wol(adapter,
2523 adapter->netdev->dev_addr, &cmd);
2524 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2525 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2526 } else {
2527 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2528 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2529 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2530 }
2531
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002532 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002533 return status;
2534}
2535
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002536/*
2537 * Generate a seed MAC address from the PF MAC Address using jhash.
2538 * MAC Address for VFs are assigned incrementally starting from the seed.
2539 * These addresses are programmed in the ASIC by the PF and the VF driver
2540 * queries for the MAC address during its probe.
2541 */
2542static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2543{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002544 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002545 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002546 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002547 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002548
2549 be_vf_eth_addr_generate(adapter, mac);
2550
Sathya Perla11ac75e2011-12-13 00:58:50 +00002551 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002552 if (lancer_chip(adapter)) {
2553 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2554 } else {
2555 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002556 vf_cfg->if_handle,
2557 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002558 }
2559
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002560 if (status)
2561 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002562 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002563 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002564 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002565
2566 mac[5] += 1;
2567 }
2568 return status;
2569}
2570
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002571static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002572{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002573 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002574 u32 vf;
2575
Sathya Perla39f1d942012-05-08 19:41:24 +00002576 if (be_find_vfs(adapter, ASSIGNED)) {
2577 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2578 goto done;
2579 }
2580
Sathya Perla11ac75e2011-12-13 00:58:50 +00002581 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002582 if (lancer_chip(adapter))
2583 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2584 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002585 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2586 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002587
Sathya Perla11ac75e2011-12-13 00:58:50 +00002588 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2589 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002590 pci_disable_sriov(adapter->pdev);
2591done:
2592 kfree(adapter->vf_cfg);
2593 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002594}
2595
Sathya Perlaa54769f2011-10-24 02:45:00 +00002596static int be_clear(struct be_adapter *adapter)
2597{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002598 int i = 1;
2599
Sathya Perla191eb752012-02-23 18:50:13 +00002600 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2601 cancel_delayed_work_sync(&adapter->work);
2602 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2603 }
2604
Sathya Perla11ac75e2011-12-13 00:58:50 +00002605 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002606 be_vf_clear(adapter);
2607
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002608 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2609 be_cmd_pmac_del(adapter, adapter->if_handle,
2610 adapter->pmac_id[i], 0);
2611
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002612 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002613
2614 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002615 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002616 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002617 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002618
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002619 kfree(adapter->pmac_id);
2620 adapter->pmac_id = NULL;
2621
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002622 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002623 return 0;
2624}
2625
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002626static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2627 u32 *cap_flags, u8 domain)
2628{
2629 bool profile_present = false;
2630 int status;
2631
2632 if (lancer_chip(adapter)) {
2633 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2634 if (!status)
2635 profile_present = true;
2636 }
2637
2638 if (!profile_present)
2639 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2640 BE_IF_FLAGS_MULTICAST;
2641}
2642
Sathya Perla39f1d942012-05-08 19:41:24 +00002643static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002644{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002645 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002646 int vf;
2647
Sathya Perla39f1d942012-05-08 19:41:24 +00002648 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2649 GFP_KERNEL);
2650 if (!adapter->vf_cfg)
2651 return -ENOMEM;
2652
Sathya Perla11ac75e2011-12-13 00:58:50 +00002653 for_all_vfs(adapter, vf_cfg, vf) {
2654 vf_cfg->if_handle = -1;
2655 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002656 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002657 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002658}
2659
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002660static int be_vf_setup(struct be_adapter *adapter)
2661{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002662 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002663 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002664 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002665 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002666 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002667
Sathya Perla39f1d942012-05-08 19:41:24 +00002668 enabled_vfs = be_find_vfs(adapter, ENABLED);
2669 if (enabled_vfs) {
2670 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2671 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2672 return 0;
2673 }
2674
2675 if (num_vfs > adapter->dev_num_vfs) {
2676 dev_warn(dev, "Device supports %d VFs and not %d\n",
2677 adapter->dev_num_vfs, num_vfs);
2678 num_vfs = adapter->dev_num_vfs;
2679 }
2680
2681 status = pci_enable_sriov(adapter->pdev, num_vfs);
2682 if (!status) {
2683 adapter->num_vfs = num_vfs;
2684 } else {
2685 /* Platform doesn't support SRIOV though device supports it */
2686 dev_warn(dev, "SRIOV enable failed\n");
2687 return 0;
2688 }
2689
2690 status = be_vf_setup_init(adapter);
2691 if (status)
2692 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002693
Sathya Perla11ac75e2011-12-13 00:58:50 +00002694 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002695 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2696
2697 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2698 BE_IF_FLAGS_BROADCAST |
2699 BE_IF_FLAGS_MULTICAST);
2700
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002701 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2702 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002703 if (status)
2704 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002705 }
2706
Sathya Perla39f1d942012-05-08 19:41:24 +00002707 if (!enabled_vfs) {
2708 status = be_vf_eth_addr_config(adapter);
2709 if (status)
2710 goto err;
2711 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002712
Sathya Perla11ac75e2011-12-13 00:58:50 +00002713 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002714 lnk_speed = 1000;
2715 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002716 if (status)
2717 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002718 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002719
2720 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2721 vf + 1, vf_cfg->if_handle);
2722 if (status)
2723 goto err;
2724 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002725
2726 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002727 }
2728 return 0;
2729err:
2730 return status;
2731}
2732
Sathya Perla30128032011-11-10 19:17:57 +00002733static void be_setup_init(struct be_adapter *adapter)
2734{
2735 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002736 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002737 adapter->if_handle = -1;
2738 adapter->be3_native = false;
2739 adapter->promiscuous = false;
2740 adapter->eq_next_idx = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002741
2742 if (be_physfn(adapter))
2743 adapter->cmd_privileges = MAX_PRIVILEGES;
2744 else
2745 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002746}
2747
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002748static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2749 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002750{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002751 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002752
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002753 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2754 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2755 if (!lancer_chip(adapter) && !be_physfn(adapter))
2756 *active_mac = true;
2757 else
2758 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002759
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002760 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002761 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002762
2763 if (lancer_chip(adapter)) {
2764 status = be_cmd_get_mac_from_list(adapter, mac,
2765 active_mac, pmac_id, 0);
2766 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002767 status = be_cmd_mac_addr_query(adapter, mac, false,
2768 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002769 }
2770 } else if (be_physfn(adapter)) {
2771 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002772 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002773 *active_mac = false;
2774 } else {
2775 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002776 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002777 if_handle, 0);
2778 *active_mac = true;
2779 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002780 return status;
2781}
2782
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002783static void be_get_resources(struct be_adapter *adapter)
2784{
2785 int status;
2786 bool profile_present = false;
2787
2788 if (lancer_chip(adapter)) {
2789 status = be_cmd_get_func_config(adapter);
2790
2791 if (!status)
2792 profile_present = true;
2793 }
2794
2795 if (profile_present) {
2796 /* Sanity fixes for Lancer */
2797 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2798 BE_UC_PMAC_COUNT);
2799 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2800 BE_NUM_VLANS_SUPPORTED);
2801 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2802 BE_MAX_MC);
2803 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2804 MAX_TX_QS);
2805 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2806 BE3_MAX_RSS_QS);
2807 adapter->max_event_queues = min_t(u16,
2808 adapter->max_event_queues,
2809 BE3_MAX_RSS_QS);
2810
2811 if (adapter->max_rss_queues &&
2812 adapter->max_rss_queues == adapter->max_rx_queues)
2813 adapter->max_rss_queues -= 1;
2814
2815 if (adapter->max_event_queues < adapter->max_rss_queues)
2816 adapter->max_rss_queues = adapter->max_event_queues;
2817
2818 } else {
2819 if (be_physfn(adapter))
2820 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2821 else
2822 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2823
2824 if (adapter->function_mode & FLEX10_MODE)
2825 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2826 else
2827 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2828
2829 adapter->max_mcast_mac = BE_MAX_MC;
2830 adapter->max_tx_queues = MAX_TX_QS;
2831 adapter->max_rss_queues = (adapter->be3_native) ?
2832 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2833 adapter->max_event_queues = BE3_MAX_RSS_QS;
2834
2835 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2836 BE_IF_FLAGS_BROADCAST |
2837 BE_IF_FLAGS_MULTICAST |
2838 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2839 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2840 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2841 BE_IF_FLAGS_PROMISCUOUS;
2842
2843 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2844 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2845 }
2846}
2847
Sathya Perla39f1d942012-05-08 19:41:24 +00002848/* Routine to query per function resource limits */
2849static int be_get_config(struct be_adapter *adapter)
2850{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002851 int pos, status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002852 u16 dev_num_vfs;
2853
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002854 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2855 &adapter->function_mode,
2856 &adapter->function_caps);
2857 if (status)
2858 goto err;
2859
2860 be_get_resources(adapter);
2861
2862 /* primary mac needs 1 pmac entry */
2863 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2864 sizeof(u32), GFP_KERNEL);
2865 if (!adapter->pmac_id) {
2866 status = -ENOMEM;
2867 goto err;
2868 }
2869
Sathya Perla39f1d942012-05-08 19:41:24 +00002870 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2871 if (pos) {
2872 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2873 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002874 if (!lancer_chip(adapter))
2875 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002876 adapter->dev_num_vfs = dev_num_vfs;
2877 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002878err:
2879 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002880}
2881
Sathya Perla5fb379e2009-06-18 00:02:59 +00002882static int be_setup(struct be_adapter *adapter)
2883{
Sathya Perla39f1d942012-05-08 19:41:24 +00002884 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002885 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002886 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002887 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002888 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002889 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002890
Sathya Perla30128032011-11-10 19:17:57 +00002891 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002892
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002893 if (!lancer_chip(adapter))
2894 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002895
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002896 status = be_get_config(adapter);
2897 if (status)
2898 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002899
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002900 be_msix_enable(adapter);
2901
2902 status = be_evt_queues_create(adapter);
2903 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002904 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002905
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002906 status = be_tx_cqs_create(adapter);
2907 if (status)
2908 goto err;
2909
2910 status = be_rx_cqs_create(adapter);
2911 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002912 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002913
Sathya Perla5fb379e2009-06-18 00:02:59 +00002914 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002915 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002916 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002917
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002918 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2919 /* In UMC mode FW does not return right privileges.
2920 * Override with correct privilege equivalent to PF.
2921 */
2922 if (be_is_mc(adapter))
2923 adapter->cmd_privileges = MAX_PRIVILEGES;
2924
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002925 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2926 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002927
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002928 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002929 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002930
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002931 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002932
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002933 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002934 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002935 if (status != 0)
2936 goto err;
2937
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002938 memset(mac, 0, ETH_ALEN);
2939 active_mac = false;
2940 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2941 &active_mac, &adapter->pmac_id[0]);
2942 if (status != 0)
2943 goto err;
2944
2945 if (!active_mac) {
2946 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2947 &adapter->pmac_id[0], 0);
2948 if (status != 0)
2949 goto err;
2950 }
2951
2952 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2953 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2954 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002955 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002956
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002957 status = be_tx_qs_create(adapter);
2958 if (status)
2959 goto err;
2960
Sathya Perla04b71172011-09-27 13:30:27 -04002961 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002962
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002963 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002964 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002965
2966 be_set_rx_mode(adapter->netdev);
2967
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002968 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002969
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002970 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2971 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002972 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002973
Sathya Perla39f1d942012-05-08 19:41:24 +00002974 if (be_physfn(adapter) && num_vfs) {
2975 if (adapter->dev_num_vfs)
2976 be_vf_setup(adapter);
2977 else
2978 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002979 }
2980
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002981 status = be_cmd_get_phy_info(adapter);
2982 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002983 adapter->phy.fc_autoneg = 1;
2984
Sathya Perla191eb752012-02-23 18:50:13 +00002985 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2986 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002987 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002988err:
2989 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002990 return status;
2991}
2992
Ivan Vecera66268732011-12-08 01:31:21 +00002993#ifdef CONFIG_NET_POLL_CONTROLLER
2994static void be_netpoll(struct net_device *netdev)
2995{
2996 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002997 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002998 int i;
2999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003000 for_all_evt_queues(adapter, eqo, i)
3001 event_handle(eqo);
3002
3003 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003004}
3005#endif
3006
Ajit Khaparde84517482009-09-04 03:12:16 +00003007#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003008char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3009
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003010static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003011 const u8 *p, u32 img_start, int image_size,
3012 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003013{
3014 u32 crc_offset;
3015 u8 flashed_crc[4];
3016 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003017
3018 crc_offset = hdr_size + img_start + image_size - 4;
3019
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003020 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003021
3022 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003023 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003024 if (status) {
3025 dev_err(&adapter->pdev->dev,
3026 "could not get crc from flash, not flashing redboot\n");
3027 return false;
3028 }
3029
3030 /*update redboot only if crc does not match*/
3031 if (!memcmp(flashed_crc, p, 4))
3032 return false;
3033 else
3034 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003035}
3036
Sathya Perla306f1342011-08-02 19:57:45 +00003037static bool phy_flashing_required(struct be_adapter *adapter)
3038{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003039 return (adapter->phy.phy_type == TN_8022 &&
3040 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003041}
3042
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003043static bool is_comp_in_ufi(struct be_adapter *adapter,
3044 struct flash_section_info *fsec, int type)
3045{
3046 int i = 0, img_type = 0;
3047 struct flash_section_info_g2 *fsec_g2 = NULL;
3048
3049 if (adapter->generation != BE_GEN3)
3050 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3051
3052 for (i = 0; i < MAX_FLASH_COMP; i++) {
3053 if (fsec_g2)
3054 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3055 else
3056 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3057
3058 if (img_type == type)
3059 return true;
3060 }
3061 return false;
3062
3063}
3064
3065struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3066 int header_size,
3067 const struct firmware *fw)
3068{
3069 struct flash_section_info *fsec = NULL;
3070 const u8 *p = fw->data;
3071
3072 p += header_size;
3073 while (p < (fw->data + fw->size)) {
3074 fsec = (struct flash_section_info *)p;
3075 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3076 return fsec;
3077 p += 32;
3078 }
3079 return NULL;
3080}
3081
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003082static int be_flash(struct be_adapter *adapter, const u8 *img,
3083 struct be_dma_mem *flash_cmd, int optype, int img_size)
3084{
3085 u32 total_bytes = 0, flash_op, num_bytes = 0;
3086 int status = 0;
3087 struct be_cmd_write_flashrom *req = flash_cmd->va;
3088
3089 total_bytes = img_size;
3090 while (total_bytes) {
3091 num_bytes = min_t(u32, 32*1024, total_bytes);
3092
3093 total_bytes -= num_bytes;
3094
3095 if (!total_bytes) {
3096 if (optype == OPTYPE_PHY_FW)
3097 flash_op = FLASHROM_OPER_PHY_FLASH;
3098 else
3099 flash_op = FLASHROM_OPER_FLASH;
3100 } else {
3101 if (optype == OPTYPE_PHY_FW)
3102 flash_op = FLASHROM_OPER_PHY_SAVE;
3103 else
3104 flash_op = FLASHROM_OPER_SAVE;
3105 }
3106
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003107 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003108 img += num_bytes;
3109 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3110 flash_op, num_bytes);
3111 if (status) {
3112 if (status == ILLEGAL_IOCTL_REQ &&
3113 optype == OPTYPE_PHY_FW)
3114 break;
3115 dev_err(&adapter->pdev->dev,
3116 "cmd to write to flash rom failed.\n");
3117 return status;
3118 }
3119 }
3120 return 0;
3121}
3122
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003123static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003124 const struct firmware *fw,
3125 struct be_dma_mem *flash_cmd,
3126 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003127
Ajit Khaparde84517482009-09-04 03:12:16 +00003128{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003129 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003130 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003131 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003132 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003133 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003134 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003135
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003136 struct flash_comp gen3_flash_types[] = {
3137 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3138 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3139 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3140 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3141 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3142 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3143 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3144 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3145 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3146 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3147 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3148 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3149 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3150 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3151 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3152 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3153 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3154 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3155 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3156 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003157 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003158
3159 struct flash_comp gen2_flash_types[] = {
3160 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3161 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3162 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3163 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3164 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3165 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3166 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3167 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3168 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3169 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3170 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3171 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3172 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3173 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3174 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3175 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003176 };
3177
3178 if (adapter->generation == BE_GEN3) {
3179 pflashcomp = gen3_flash_types;
3180 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003181 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003182 } else {
3183 pflashcomp = gen2_flash_types;
3184 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003185 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003186 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003187 /* Get flash section info*/
3188 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3189 if (!fsec) {
3190 dev_err(&adapter->pdev->dev,
3191 "Invalid Cookie. UFI corrupted ?\n");
3192 return -1;
3193 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003194 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003195 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003196 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003197
3198 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3199 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3200 continue;
3201
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003202 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3203 !phy_flashing_required(adapter))
3204 continue;
3205
3206 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3207 redboot = be_flash_redboot(adapter, fw->data,
3208 pflashcomp[i].offset, pflashcomp[i].size,
3209 filehdr_size + img_hdrs_size);
3210 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003211 continue;
3212 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003213
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003214 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003215 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003216 if (p + pflashcomp[i].size > fw->data + fw->size)
3217 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003218
3219 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3220 pflashcomp[i].size);
3221 if (status) {
3222 dev_err(&adapter->pdev->dev,
3223 "Flashing section type %d failed.\n",
3224 pflashcomp[i].img_type);
3225 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003226 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003227 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003228 return 0;
3229}
3230
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003231static int be_flash_skyhawk(struct be_adapter *adapter,
3232 const struct firmware *fw,
3233 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003234{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003235 int status = 0, i, filehdr_size = 0;
3236 int img_offset, img_size, img_optype, redboot;
3237 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3238 const u8 *p = fw->data;
3239 struct flash_section_info *fsec = NULL;
3240
3241 filehdr_size = sizeof(struct flash_file_hdr_g3);
3242 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3243 if (!fsec) {
3244 dev_err(&adapter->pdev->dev,
3245 "Invalid Cookie. UFI corrupted ?\n");
3246 return -1;
3247 }
3248
3249 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3250 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3251 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3252
3253 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3254 case IMAGE_FIRMWARE_iSCSI:
3255 img_optype = OPTYPE_ISCSI_ACTIVE;
3256 break;
3257 case IMAGE_BOOT_CODE:
3258 img_optype = OPTYPE_REDBOOT;
3259 break;
3260 case IMAGE_OPTION_ROM_ISCSI:
3261 img_optype = OPTYPE_BIOS;
3262 break;
3263 case IMAGE_OPTION_ROM_PXE:
3264 img_optype = OPTYPE_PXE_BIOS;
3265 break;
3266 case IMAGE_OPTION_ROM_FCoE:
3267 img_optype = OPTYPE_FCOE_BIOS;
3268 break;
3269 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3270 img_optype = OPTYPE_ISCSI_BACKUP;
3271 break;
3272 case IMAGE_NCSI:
3273 img_optype = OPTYPE_NCSI_FW;
3274 break;
3275 default:
3276 continue;
3277 }
3278
3279 if (img_optype == OPTYPE_REDBOOT) {
3280 redboot = be_flash_redboot(adapter, fw->data,
3281 img_offset, img_size,
3282 filehdr_size + img_hdrs_size);
3283 if (!redboot)
3284 continue;
3285 }
3286
3287 p = fw->data;
3288 p += filehdr_size + img_offset + img_hdrs_size;
3289 if (p + img_size > fw->data + fw->size)
3290 return -1;
3291
3292 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3293 if (status) {
3294 dev_err(&adapter->pdev->dev,
3295 "Flashing section type %d failed.\n",
3296 fsec->fsec_entry[i].type);
3297 return status;
3298 }
3299 }
3300 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003301}
3302
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003303static int lancer_wait_idle(struct be_adapter *adapter)
3304{
3305#define SLIPORT_IDLE_TIMEOUT 30
3306 u32 reg_val;
3307 int status = 0, i;
3308
3309 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3310 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3311 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3312 break;
3313
3314 ssleep(1);
3315 }
3316
3317 if (i == SLIPORT_IDLE_TIMEOUT)
3318 status = -1;
3319
3320 return status;
3321}
3322
3323static int lancer_fw_reset(struct be_adapter *adapter)
3324{
3325 int status = 0;
3326
3327 status = lancer_wait_idle(adapter);
3328 if (status)
3329 return status;
3330
3331 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3332 PHYSDEV_CONTROL_OFFSET);
3333
3334 return status;
3335}
3336
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003337static int lancer_fw_download(struct be_adapter *adapter,
3338 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003339{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003340#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3341#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3342 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003343 const u8 *data_ptr = NULL;
3344 u8 *dest_image_ptr = NULL;
3345 size_t image_size = 0;
3346 u32 chunk_size = 0;
3347 u32 data_written = 0;
3348 u32 offset = 0;
3349 int status = 0;
3350 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003351 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003352
3353 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3354 dev_err(&adapter->pdev->dev,
3355 "FW Image not properly aligned. "
3356 "Length must be 4 byte aligned.\n");
3357 status = -EINVAL;
3358 goto lancer_fw_exit;
3359 }
3360
3361 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3362 + LANCER_FW_DOWNLOAD_CHUNK;
3363 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3364 &flash_cmd.dma, GFP_KERNEL);
3365 if (!flash_cmd.va) {
3366 status = -ENOMEM;
3367 dev_err(&adapter->pdev->dev,
3368 "Memory allocation failure while flashing\n");
3369 goto lancer_fw_exit;
3370 }
3371
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003372 dest_image_ptr = flash_cmd.va +
3373 sizeof(struct lancer_cmd_req_write_object);
3374 image_size = fw->size;
3375 data_ptr = fw->data;
3376
3377 while (image_size) {
3378 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3379
3380 /* Copy the image chunk content. */
3381 memcpy(dest_image_ptr, data_ptr, chunk_size);
3382
3383 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003384 chunk_size, offset,
3385 LANCER_FW_DOWNLOAD_LOCATION,
3386 &data_written, &change_status,
3387 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003388 if (status)
3389 break;
3390
3391 offset += data_written;
3392 data_ptr += data_written;
3393 image_size -= data_written;
3394 }
3395
3396 if (!status) {
3397 /* Commit the FW written */
3398 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003399 0, offset,
3400 LANCER_FW_DOWNLOAD_LOCATION,
3401 &data_written, &change_status,
3402 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003403 }
3404
3405 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3406 flash_cmd.dma);
3407 if (status) {
3408 dev_err(&adapter->pdev->dev,
3409 "Firmware load error. "
3410 "Status code: 0x%x Additional Status: 0x%x\n",
3411 status, add_status);
3412 goto lancer_fw_exit;
3413 }
3414
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003415 if (change_status == LANCER_FW_RESET_NEEDED) {
3416 status = lancer_fw_reset(adapter);
3417 if (status) {
3418 dev_err(&adapter->pdev->dev,
3419 "Adapter busy for FW reset.\n"
3420 "New FW will not be active.\n");
3421 goto lancer_fw_exit;
3422 }
3423 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3424 dev_err(&adapter->pdev->dev,
3425 "System reboot required for new FW"
3426 " to be active\n");
3427 }
3428
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003429 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3430lancer_fw_exit:
3431 return status;
3432}
3433
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003434static int be_get_ufi_gen(struct be_adapter *adapter,
3435 struct flash_file_hdr_g2 *fhdr)
3436{
3437 if (fhdr == NULL)
3438 goto be_get_ufi_exit;
3439
3440 if (adapter->generation == BE_GEN3) {
3441 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3442 return SH_HW;
3443 else if (!skyhawk_chip(adapter) && fhdr->build[0] == '3')
3444 return BE_GEN3;
3445 } else if (adapter->generation == BE_GEN2 && fhdr->build[0] == '2') {
3446 return BE_GEN2;
3447 }
3448
3449be_get_ufi_exit:
3450 dev_err(&adapter->pdev->dev,
3451 "UFI and Interface are not compatible for flashing\n");
3452 return -1;
3453}
3454
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003455static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3456{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003457 struct flash_file_hdr_g2 *fhdr;
3458 struct flash_file_hdr_g3 *fhdr3;
3459 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003460 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003461 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003462 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003463
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003464 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003465 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3466 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003467 if (!flash_cmd.va) {
3468 status = -ENOMEM;
3469 dev_err(&adapter->pdev->dev,
3470 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003471 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003472 }
3473
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003474 p = fw->data;
3475 fhdr = (struct flash_file_hdr_g2 *)p;
3476
3477 ufi_type = be_get_ufi_gen(adapter, fhdr);
3478
3479 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3480 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3481 for (i = 0; i < num_imgs; i++) {
3482 img_hdr_ptr = (struct image_hdr *)(fw->data +
3483 (sizeof(struct flash_file_hdr_g3) +
3484 i * sizeof(struct image_hdr)));
3485 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3486 if (ufi_type == SH_HW)
3487 status = be_flash_skyhawk(adapter, fw,
3488 &flash_cmd, num_imgs);
3489 else if (ufi_type == BE_GEN3)
3490 status = be_flash_data(adapter, fw,
3491 &flash_cmd, num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003492 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003493 }
3494
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003495 if (ufi_type == BE_GEN2)
3496 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3497 else if (ufi_type == -1)
3498 status = -1;
3499
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003500 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3501 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003502 if (status) {
3503 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003504 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003505 }
3506
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003507 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003508
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003509be_fw_exit:
3510 return status;
3511}
3512
3513int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3514{
3515 const struct firmware *fw;
3516 int status;
3517
3518 if (!netif_running(adapter->netdev)) {
3519 dev_err(&adapter->pdev->dev,
3520 "Firmware load not allowed (interface is down)\n");
3521 return -1;
3522 }
3523
3524 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3525 if (status)
3526 goto fw_exit;
3527
3528 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3529
3530 if (lancer_chip(adapter))
3531 status = lancer_fw_download(adapter, fw);
3532 else
3533 status = be_fw_download(adapter, fw);
3534
Ajit Khaparde84517482009-09-04 03:12:16 +00003535fw_exit:
3536 release_firmware(fw);
3537 return status;
3538}
3539
stephen hemmingere5686ad2012-01-05 19:10:25 +00003540static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003541 .ndo_open = be_open,
3542 .ndo_stop = be_close,
3543 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003544 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003545 .ndo_set_mac_address = be_mac_addr_set,
3546 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003547 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003548 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003549 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3550 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003551 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003552 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003553 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003554 .ndo_get_vf_config = be_get_vf_config,
3555#ifdef CONFIG_NET_POLL_CONTROLLER
3556 .ndo_poll_controller = be_netpoll,
3557#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003558};
3559
3560static void be_netdev_init(struct net_device *netdev)
3561{
3562 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003563 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003564 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003565
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003566 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003567 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3568 NETIF_F_HW_VLAN_TX;
3569 if (be_multi_rxq(adapter))
3570 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003571
3572 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003573 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003574
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003575 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003576 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003577
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003578 netdev->priv_flags |= IFF_UNICAST_FLT;
3579
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003580 netdev->flags |= IFF_MULTICAST;
3581
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003582 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003583
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003584 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003585
3586 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3587
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003588 for_all_evt_queues(adapter, eqo, i)
3589 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003590}
3591
3592static void be_unmap_pci_bars(struct be_adapter *adapter)
3593{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003594 if (adapter->csr)
3595 iounmap(adapter->csr);
3596 if (adapter->db)
3597 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003598 if (adapter->roce_db.base)
3599 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3600}
3601
3602static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3603{
3604 struct pci_dev *pdev = adapter->pdev;
3605 u8 __iomem *addr;
3606
3607 addr = pci_iomap(pdev, 2, 0);
3608 if (addr == NULL)
3609 return -ENOMEM;
3610
3611 adapter->roce_db.base = addr;
3612 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3613 adapter->roce_db.size = 8192;
3614 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3615 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003616}
3617
3618static int be_map_pci_bars(struct be_adapter *adapter)
3619{
3620 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003621 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003622
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003623 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003624 if (be_type_2_3(adapter)) {
3625 addr = ioremap_nocache(
3626 pci_resource_start(adapter->pdev, 0),
3627 pci_resource_len(adapter->pdev, 0));
3628 if (addr == NULL)
3629 return -ENOMEM;
3630 adapter->db = addr;
3631 }
3632 if (adapter->if_type == SLI_INTF_TYPE_3) {
3633 if (lancer_roce_map_pci_bars(adapter))
3634 goto pci_map_err;
3635 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003636 return 0;
3637 }
3638
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003639 if (be_physfn(adapter)) {
3640 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3641 pci_resource_len(adapter->pdev, 2));
3642 if (addr == NULL)
3643 return -ENOMEM;
3644 adapter->csr = addr;
3645 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003646
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003647 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003648 db_reg = 4;
3649 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003650 if (be_physfn(adapter))
3651 db_reg = 4;
3652 else
3653 db_reg = 0;
3654 }
3655 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3656 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003657 if (addr == NULL)
3658 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003659 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003660 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3661 adapter->roce_db.size = 4096;
3662 adapter->roce_db.io_addr =
3663 pci_resource_start(adapter->pdev, db_reg);
3664 adapter->roce_db.total_size =
3665 pci_resource_len(adapter->pdev, db_reg);
3666 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003667 return 0;
3668pci_map_err:
3669 be_unmap_pci_bars(adapter);
3670 return -ENOMEM;
3671}
3672
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003673static void be_ctrl_cleanup(struct be_adapter *adapter)
3674{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003675 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003676
3677 be_unmap_pci_bars(adapter);
3678
3679 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003680 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3681 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003682
Sathya Perla5b8821b2011-08-02 19:57:44 +00003683 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003684 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003685 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3686 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003687}
3688
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003689static int be_ctrl_init(struct be_adapter *adapter)
3690{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003691 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3692 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003693 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003694 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003695
3696 status = be_map_pci_bars(adapter);
3697 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003698 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003699
3700 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003701 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3702 mbox_mem_alloc->size,
3703 &mbox_mem_alloc->dma,
3704 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003705 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003706 status = -ENOMEM;
3707 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003708 }
3709 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3710 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3711 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3712 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003713
Sathya Perla5b8821b2011-08-02 19:57:44 +00003714 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3715 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3716 &rx_filter->dma, GFP_KERNEL);
3717 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003718 status = -ENOMEM;
3719 goto free_mbox;
3720 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003721 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003722 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003723 spin_lock_init(&adapter->mcc_lock);
3724 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003725
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003726 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003727 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003728 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003729
3730free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003731 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3732 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003733
3734unmap_pci_bars:
3735 be_unmap_pci_bars(adapter);
3736
3737done:
3738 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003739}
3740
3741static void be_stats_cleanup(struct be_adapter *adapter)
3742{
Sathya Perla3abcded2010-10-03 22:12:27 -07003743 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003744
3745 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003746 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3747 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003748}
3749
3750static int be_stats_init(struct be_adapter *adapter)
3751{
Sathya Perla3abcded2010-10-03 22:12:27 -07003752 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003753
Selvin Xavier005d5692011-05-16 07:36:35 +00003754 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003755 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003756 } else {
3757 if (lancer_chip(adapter))
3758 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3759 else
3760 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3761 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003762 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3763 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003764 if (cmd->va == NULL)
3765 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003766 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003767 return 0;
3768}
3769
3770static void __devexit be_remove(struct pci_dev *pdev)
3771{
3772 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003773
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003774 if (!adapter)
3775 return;
3776
Parav Pandit045508a2012-03-26 14:27:13 +00003777 be_roce_dev_remove(adapter);
3778
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003779 cancel_delayed_work_sync(&adapter->func_recovery_work);
3780
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003781 unregister_netdev(adapter->netdev);
3782
Sathya Perla5fb379e2009-06-18 00:02:59 +00003783 be_clear(adapter);
3784
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003785 /* tell fw we're done with firing cmds */
3786 be_cmd_fw_clean(adapter);
3787
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003788 be_stats_cleanup(adapter);
3789
3790 be_ctrl_cleanup(adapter);
3791
Sathya Perlad6b6d982012-09-05 01:56:48 +00003792 pci_disable_pcie_error_reporting(pdev);
3793
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003794 pci_set_drvdata(pdev, NULL);
3795 pci_release_regions(pdev);
3796 pci_disable_device(pdev);
3797
3798 free_netdev(adapter->netdev);
3799}
3800
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003801bool be_is_wol_supported(struct be_adapter *adapter)
3802{
3803 return ((adapter->wol_cap & BE_WOL_CAP) &&
3804 !be_is_wol_excluded(adapter)) ? true : false;
3805}
3806
Somnath Kotur941a77d2012-05-17 22:59:03 +00003807u32 be_get_fw_log_level(struct be_adapter *adapter)
3808{
3809 struct be_dma_mem extfat_cmd;
3810 struct be_fat_conf_params *cfgs;
3811 int status;
3812 u32 level = 0;
3813 int j;
3814
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003815 if (lancer_chip(adapter))
3816 return 0;
3817
Somnath Kotur941a77d2012-05-17 22:59:03 +00003818 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3819 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3820 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3821 &extfat_cmd.dma);
3822
3823 if (!extfat_cmd.va) {
3824 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3825 __func__);
3826 goto err;
3827 }
3828
3829 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3830 if (!status) {
3831 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3832 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003833 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003834 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3835 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3836 }
3837 }
3838 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3839 extfat_cmd.dma);
3840err:
3841 return level;
3842}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003843
Sathya Perla39f1d942012-05-08 19:41:24 +00003844static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003845{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003846 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003847 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003848
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003849 status = be_cmd_get_cntl_attributes(adapter);
3850 if (status)
3851 return status;
3852
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003853 status = be_cmd_get_acpi_wol_cap(adapter);
3854 if (status) {
3855 /* in case of a failure to get wol capabillities
3856 * check the exclusion list to determine WOL capability */
3857 if (!be_is_wol_excluded(adapter))
3858 adapter->wol_cap |= BE_WOL_CAP;
3859 }
3860
3861 if (be_is_wol_supported(adapter))
3862 adapter->wol = true;
3863
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003864 /* Must be a power of 2 or else MODULO will BUG_ON */
3865 adapter->be_get_temp_freq = 64;
3866
Somnath Kotur941a77d2012-05-17 22:59:03 +00003867 level = be_get_fw_log_level(adapter);
3868 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3869
Sathya Perla2243e2e2009-11-22 22:02:03 +00003870 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003871}
3872
Sathya Perla39f1d942012-05-08 19:41:24 +00003873static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003874{
3875 struct pci_dev *pdev = adapter->pdev;
3876 u32 sli_intf = 0, if_type;
3877
3878 switch (pdev->device) {
3879 case BE_DEVICE_ID1:
3880 case OC_DEVICE_ID1:
3881 adapter->generation = BE_GEN2;
3882 break;
3883 case BE_DEVICE_ID2:
3884 case OC_DEVICE_ID2:
3885 adapter->generation = BE_GEN3;
3886 break;
3887 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003888 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003889 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003890 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3891 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003892 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3893 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003894 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003895 !be_type_2_3(adapter)) {
3896 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3897 return -EINVAL;
3898 }
3899 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3900 SLI_INTF_FAMILY_SHIFT);
3901 adapter->generation = BE_GEN3;
3902 break;
3903 case OC_DEVICE_ID5:
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +00003904 case OC_DEVICE_ID6:
Parav Pandit045508a2012-03-26 14:27:13 +00003905 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3906 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003907 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3908 return -EINVAL;
3909 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003910 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3911 SLI_INTF_FAMILY_SHIFT);
3912 adapter->generation = BE_GEN3;
3913 break;
3914 default:
3915 adapter->generation = 0;
3916 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003917
3918 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3919 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003920 return 0;
3921}
3922
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003923static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003924{
3925 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003926
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003927 status = lancer_test_and_set_rdy_state(adapter);
3928 if (status)
3929 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003930
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003931 if (netif_running(adapter->netdev))
3932 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003933
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003934 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003935
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003936 adapter->hw_error = false;
3937 adapter->fw_timeout = false;
3938
3939 status = be_setup(adapter);
3940 if (status)
3941 goto err;
3942
3943 if (netif_running(adapter->netdev)) {
3944 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003945 if (status)
3946 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003947 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003948
3949 dev_err(&adapter->pdev->dev,
3950 "Adapter SLIPORT recovery succeeded\n");
3951 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003952err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003953 if (adapter->eeh_error)
3954 dev_err(&adapter->pdev->dev,
3955 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003956
3957 return status;
3958}
3959
3960static void be_func_recovery_task(struct work_struct *work)
3961{
3962 struct be_adapter *adapter =
3963 container_of(work, struct be_adapter, func_recovery_work.work);
3964 int status;
3965
3966 be_detect_error(adapter);
3967
3968 if (adapter->hw_error && lancer_chip(adapter)) {
3969
3970 if (adapter->eeh_error)
3971 goto out;
3972
3973 rtnl_lock();
3974 netif_device_detach(adapter->netdev);
3975 rtnl_unlock();
3976
3977 status = lancer_recover_func(adapter);
3978
3979 if (!status)
3980 netif_device_attach(adapter->netdev);
3981 }
3982
3983out:
3984 schedule_delayed_work(&adapter->func_recovery_work,
3985 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003986}
3987
3988static void be_worker(struct work_struct *work)
3989{
3990 struct be_adapter *adapter =
3991 container_of(work, struct be_adapter, work.work);
3992 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003993 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003994 int i;
3995
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003996 /* when interrupts are not yet enabled, just reap any pending
3997 * mcc completions */
3998 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00003999 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004000 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004001 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004002 goto reschedule;
4003 }
4004
4005 if (!adapter->stats_cmd_sent) {
4006 if (lancer_chip(adapter))
4007 lancer_cmd_get_pport_stats(adapter,
4008 &adapter->stats_cmd);
4009 else
4010 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4011 }
4012
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004013 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4014 be_cmd_get_die_temperature(adapter);
4015
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004016 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004017 if (rxo->rx_post_starved) {
4018 rxo->rx_post_starved = false;
4019 be_post_rx_frags(rxo, GFP_KERNEL);
4020 }
4021 }
4022
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004023 for_all_evt_queues(adapter, eqo, i)
4024 be_eqd_update(adapter, eqo);
4025
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004026reschedule:
4027 adapter->work_counter++;
4028 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4029}
4030
Sathya Perla39f1d942012-05-08 19:41:24 +00004031static bool be_reset_required(struct be_adapter *adapter)
4032{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004033 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004034}
4035
Sathya Perlad3791422012-09-28 04:39:44 +00004036static char *mc_name(struct be_adapter *adapter)
4037{
4038 if (adapter->function_mode & FLEX10_MODE)
4039 return "FLEX10";
4040 else if (adapter->function_mode & VNIC_MODE)
4041 return "vNIC";
4042 else if (adapter->function_mode & UMC_ENABLED)
4043 return "UMC";
4044 else
4045 return "";
4046}
4047
4048static inline char *func_name(struct be_adapter *adapter)
4049{
4050 return be_physfn(adapter) ? "PF" : "VF";
4051}
4052
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004053static int __devinit be_probe(struct pci_dev *pdev,
4054 const struct pci_device_id *pdev_id)
4055{
4056 int status = 0;
4057 struct be_adapter *adapter;
4058 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004059 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004060
4061 status = pci_enable_device(pdev);
4062 if (status)
4063 goto do_none;
4064
4065 status = pci_request_regions(pdev, DRV_NAME);
4066 if (status)
4067 goto disable_dev;
4068 pci_set_master(pdev);
4069
Sathya Perla7f640062012-06-05 19:37:20 +00004070 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004071 if (netdev == NULL) {
4072 status = -ENOMEM;
4073 goto rel_reg;
4074 }
4075 adapter = netdev_priv(netdev);
4076 adapter->pdev = pdev;
4077 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004078
Sathya Perla39f1d942012-05-08 19:41:24 +00004079 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00004080 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004081 goto free_netdev;
4082
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004083 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004084 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004085
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004086 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004087 if (!status) {
4088 netdev->features |= NETIF_F_HIGHDMA;
4089 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004090 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004091 if (status) {
4092 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4093 goto free_netdev;
4094 }
4095 }
4096
Sathya Perlad6b6d982012-09-05 01:56:48 +00004097 status = pci_enable_pcie_error_reporting(pdev);
4098 if (status)
4099 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4100
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004101 status = be_ctrl_init(adapter);
4102 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004103 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004104
Sathya Perla2243e2e2009-11-22 22:02:03 +00004105 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004106 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004107 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004108 if (status)
4109 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004110 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004111
4112 /* tell fw we're ready to fire cmds */
4113 status = be_cmd_fw_init(adapter);
4114 if (status)
4115 goto ctrl_clean;
4116
Sathya Perla39f1d942012-05-08 19:41:24 +00004117 if (be_reset_required(adapter)) {
4118 status = be_cmd_reset_function(adapter);
4119 if (status)
4120 goto ctrl_clean;
4121 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004123 /* The INTR bit may be set in the card when probed by a kdump kernel
4124 * after a crash.
4125 */
4126 if (!lancer_chip(adapter))
4127 be_intr_set(adapter, false);
4128
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004129 status = be_stats_init(adapter);
4130 if (status)
4131 goto ctrl_clean;
4132
Sathya Perla39f1d942012-05-08 19:41:24 +00004133 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004134 if (status)
4135 goto stats_clean;
4136
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004137 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004138 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004139 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004140
Sathya Perla5fb379e2009-06-18 00:02:59 +00004141 status = be_setup(adapter);
4142 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004143 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004144
Sathya Perla3abcded2010-10-03 22:12:27 -07004145 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004146 status = register_netdev(netdev);
4147 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004148 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004149
Parav Pandit045508a2012-03-26 14:27:13 +00004150 be_roce_dev_add(adapter);
4151
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004152 schedule_delayed_work(&adapter->func_recovery_work,
4153 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004154
4155 be_cmd_query_port_name(adapter, &port_name);
4156
Sathya Perlad3791422012-09-28 04:39:44 +00004157 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4158 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004159
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004160 return 0;
4161
Sathya Perla5fb379e2009-06-18 00:02:59 +00004162unsetup:
4163 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004164stats_clean:
4165 be_stats_cleanup(adapter);
4166ctrl_clean:
4167 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004168free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004169 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004170 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004171rel_reg:
4172 pci_release_regions(pdev);
4173disable_dev:
4174 pci_disable_device(pdev);
4175do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004176 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004177 return status;
4178}
4179
4180static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4181{
4182 struct be_adapter *adapter = pci_get_drvdata(pdev);
4183 struct net_device *netdev = adapter->netdev;
4184
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004185 if (adapter->wol)
4186 be_setup_wol(adapter, true);
4187
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004188 cancel_delayed_work_sync(&adapter->func_recovery_work);
4189
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004190 netif_device_detach(netdev);
4191 if (netif_running(netdev)) {
4192 rtnl_lock();
4193 be_close(netdev);
4194 rtnl_unlock();
4195 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004196 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004197
4198 pci_save_state(pdev);
4199 pci_disable_device(pdev);
4200 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4201 return 0;
4202}
4203
4204static int be_resume(struct pci_dev *pdev)
4205{
4206 int status = 0;
4207 struct be_adapter *adapter = pci_get_drvdata(pdev);
4208 struct net_device *netdev = adapter->netdev;
4209
4210 netif_device_detach(netdev);
4211
4212 status = pci_enable_device(pdev);
4213 if (status)
4214 return status;
4215
4216 pci_set_power_state(pdev, 0);
4217 pci_restore_state(pdev);
4218
Sathya Perla2243e2e2009-11-22 22:02:03 +00004219 /* tell fw we're ready to fire cmds */
4220 status = be_cmd_fw_init(adapter);
4221 if (status)
4222 return status;
4223
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004224 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004225 if (netif_running(netdev)) {
4226 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004227 be_open(netdev);
4228 rtnl_unlock();
4229 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004230
4231 schedule_delayed_work(&adapter->func_recovery_work,
4232 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004233 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004234
4235 if (adapter->wol)
4236 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004237
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004238 return 0;
4239}
4240
Sathya Perla82456b02010-02-17 01:35:37 +00004241/*
4242 * An FLR will stop BE from DMAing any data.
4243 */
4244static void be_shutdown(struct pci_dev *pdev)
4245{
4246 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004247
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004248 if (!adapter)
4249 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004250
Sathya Perla0f4a6822011-03-21 20:49:28 +00004251 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004252 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004253
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004254 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004255
Ajit Khaparde57841862011-04-06 18:08:43 +00004256 be_cmd_reset_function(adapter);
4257
Sathya Perla82456b02010-02-17 01:35:37 +00004258 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004259}
4260
Sathya Perlacf588472010-02-14 21:22:01 +00004261static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4262 pci_channel_state_t state)
4263{
4264 struct be_adapter *adapter = pci_get_drvdata(pdev);
4265 struct net_device *netdev = adapter->netdev;
4266
4267 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4268
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004269 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004270
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004271 cancel_delayed_work_sync(&adapter->func_recovery_work);
4272
4273 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004274 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004275 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004276
4277 if (netif_running(netdev)) {
4278 rtnl_lock();
4279 be_close(netdev);
4280 rtnl_unlock();
4281 }
4282 be_clear(adapter);
4283
4284 if (state == pci_channel_io_perm_failure)
4285 return PCI_ERS_RESULT_DISCONNECT;
4286
4287 pci_disable_device(pdev);
4288
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004289 /* The error could cause the FW to trigger a flash debug dump.
4290 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004291 * can cause it not to recover; wait for it to finish.
4292 * Wait only for first function as it is needed only once per
4293 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004294 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004295 if (pdev->devfn == 0)
4296 ssleep(30);
4297
Sathya Perlacf588472010-02-14 21:22:01 +00004298 return PCI_ERS_RESULT_NEED_RESET;
4299}
4300
4301static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4302{
4303 struct be_adapter *adapter = pci_get_drvdata(pdev);
4304 int status;
4305
4306 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004307 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004308
4309 status = pci_enable_device(pdev);
4310 if (status)
4311 return PCI_ERS_RESULT_DISCONNECT;
4312
4313 pci_set_master(pdev);
4314 pci_set_power_state(pdev, 0);
4315 pci_restore_state(pdev);
4316
4317 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004318 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004319 if (status)
4320 return PCI_ERS_RESULT_DISCONNECT;
4321
Sathya Perlad6b6d982012-09-05 01:56:48 +00004322 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004323 return PCI_ERS_RESULT_RECOVERED;
4324}
4325
4326static void be_eeh_resume(struct pci_dev *pdev)
4327{
4328 int status = 0;
4329 struct be_adapter *adapter = pci_get_drvdata(pdev);
4330 struct net_device *netdev = adapter->netdev;
4331
4332 dev_info(&adapter->pdev->dev, "EEH resume\n");
4333
4334 pci_save_state(pdev);
4335
4336 /* tell fw we're ready to fire cmds */
4337 status = be_cmd_fw_init(adapter);
4338 if (status)
4339 goto err;
4340
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004341 status = be_cmd_reset_function(adapter);
4342 if (status)
4343 goto err;
4344
Sathya Perlacf588472010-02-14 21:22:01 +00004345 status = be_setup(adapter);
4346 if (status)
4347 goto err;
4348
4349 if (netif_running(netdev)) {
4350 status = be_open(netdev);
4351 if (status)
4352 goto err;
4353 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004354
4355 schedule_delayed_work(&adapter->func_recovery_work,
4356 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004357 netif_device_attach(netdev);
4358 return;
4359err:
4360 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004361}
4362
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004363static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004364 .error_detected = be_eeh_err_detected,
4365 .slot_reset = be_eeh_reset,
4366 .resume = be_eeh_resume,
4367};
4368
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004369static struct pci_driver be_driver = {
4370 .name = DRV_NAME,
4371 .id_table = be_dev_ids,
4372 .probe = be_probe,
4373 .remove = be_remove,
4374 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004375 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004376 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004377 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004378};
4379
4380static int __init be_init_module(void)
4381{
Joe Perches8e95a202009-12-03 07:58:21 +00004382 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4383 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004384 printk(KERN_WARNING DRV_NAME
4385 " : Module param rx_frag_size must be 2048/4096/8192."
4386 " Using 2048\n");
4387 rx_frag_size = 2048;
4388 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004389
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004390 return pci_register_driver(&be_driver);
4391}
4392module_init(be_init_module);
4393
4394static void __exit be_exit_module(void)
4395{
4396 pci_unregister_driver(&be_driver);
4397}
4398module_exit(be_exit_module);