blob: 1767babe31737e5d61cafab2d342c8d7a66714d1 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
Sathya Perla8788fdc2009-07-27 22:52:03 +0000156static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000160 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000161 return;
162
Sathya Perladb3ea782011-08-22 19:41:52 +0000163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000173
Sathya Perladb3ea782011-08-22 19:41:52 +0000174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176}
177
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000183
184 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000193
194 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196}
197
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000205
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000206 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000207 return;
208
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Sathya Perla8788fdc2009-07-27 22:52:03 +0000218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000224
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000225 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000226 return;
227
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232}
233
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000239 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000240 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000241 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
Sathya Perlaa65027e2009-08-17 00:58:04 +0000274 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000275 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700276
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285 return status;
286}
287
Sathya Perlaca34fe32012-11-06 17:48:56 +0000288/* BE2 supports only v0 cmd */
289static void *hw_stats_from_cmd(struct be_adapter *adapter)
290{
291 if (BE2_chip(adapter)) {
292 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294 return &cmd->hw_stats;
295 } else {
296 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 }
300}
301
302/* BE2 supports only v0 cmd */
303static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308 return &hw_stats->erx;
309 } else {
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 }
314}
315
316static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000317{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 &rxf_stats->port[adapter->port_num];
323 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000324
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000340 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_dropped_header_too_small =
342 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000343 drvs->rx_address_mismatch_drops =
344 port_stats->rx_address_mismatch_drops +
345 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->rx_alignment_symbol_errors =
347 port_stats->rx_alignment_symbol_errors;
348
349 drvs->tx_pauseframes = port_stats->tx_pauseframes;
350 drvs->tx_controlframes = port_stats->tx_controlframes;
351
352 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000355 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000357 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358 drvs->forwarded_packets = rxf_stats->forwarded_packets;
359 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000360 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000362 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363}
364
Sathya Perlaca34fe32012-11-06 17:48:56 +0000365static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000367 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 &rxf_stats->port[adapter->port_num];
372 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000373
Sathya Perlaac124ff2011-07-25 19:10:14 +0000374 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000375 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377 drvs->rx_pause_frames = port_stats->rx_pause_frames;
378 drvs->rx_crc_errors = port_stats->rx_crc_errors;
379 drvs->rx_control_frames = port_stats->rx_control_frames;
380 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390 drvs->rx_dropped_header_too_small =
391 port_stats->rx_dropped_header_too_small;
392 drvs->rx_input_fifo_overflow_drop =
393 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000394 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 drvs->rx_alignment_symbol_errors =
396 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000397 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000398 drvs->tx_pauseframes = port_stats->tx_pauseframes;
399 drvs->tx_controlframes = port_stats->tx_controlframes;
400 drvs->jabber_events = port_stats->jabber_events;
401 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 drvs->forwarded_packets = rxf_stats->forwarded_packets;
404 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000405 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000407 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408}
409
Selvin Xavier005d5692011-05-16 07:36:35 +0000410static void populate_lancer_stats(struct be_adapter *adapter)
411{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 struct lancer_pport_stats *pport_stats =
415 pport_stats_from_cmd(adapter);
416
417 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000421 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000423 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427 drvs->rx_dropped_tcp_length =
428 pport_stats->rx_dropped_invalid_tcp_length;
429 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432 drvs->rx_dropped_header_too_small =
433 pport_stats->rx_dropped_header_too_small;
434 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000435 drvs->rx_address_mismatch_drops =
436 pport_stats->rx_address_mismatch_drops +
437 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000438 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000439 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000440 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000442 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000443 drvs->forwarded_packets = pport_stats->num_forwards_lo;
444 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000445 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000446 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000447}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000448
Sathya Perla09c1c682011-08-22 19:41:53 +0000449static void accumulate_16bit_val(u32 *acc, u16 val)
450{
451#define lo(x) (x & 0xFFFF)
452#define hi(x) (x & 0xFFFF0000)
453 bool wrapped = val < lo(*acc);
454 u32 newacc = hi(*acc) + val;
455
456 if (wrapped)
457 newacc += 65536;
458 ACCESS_ONCE(*acc) = newacc;
459}
460
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461void be_parse_stats(struct be_adapter *adapter)
462{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000463 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464 struct be_rx_obj *rxo;
465 int i;
466
Sathya Perlaca34fe32012-11-06 17:48:56 +0000467 if (lancer_chip(adapter)) {
468 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000469 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000470 if (BE2_chip(adapter))
471 populate_be_v0_stats(adapter);
472 else
473 /* for BE3 and Skyhawk */
474 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000475
Sathya Perlaca34fe32012-11-06 17:48:56 +0000476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477 for_all_rx_queues(adapter, rxo, i) {
478 /* below erx HW counter can actually wrap around after
479 * 65535. Driver accumulates a 32-bit value
480 */
481 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482 (u16)erx->rx_drops_no_fragments \
483 [rxo->q.id]);
484 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000485 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000486}
487
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000491 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700493 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000494 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 u64 pkts, bytes;
496 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700497 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498
Sathya Perla3abcded2010-10-03 22:12:27 -0700499 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000500 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501 do {
502 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503 pkts = rx_stats(rxo)->rx_pkts;
504 bytes = rx_stats(rxo)->rx_bytes;
505 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506 stats->rx_packets += pkts;
507 stats->rx_bytes += bytes;
508 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700511 }
512
Sathya Perla3c8def92011-06-12 20:01:58 +0000513 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000514 const struct be_tx_stats *tx_stats = tx_stats(txo);
515 do {
516 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517 pkts = tx_stats(txo)->tx_pkts;
518 bytes = tx_stats(txo)->tx_bytes;
519 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520 stats->tx_packets += pkts;
521 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000522 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
524 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526 drvs->rx_alignment_symbol_errors +
527 drvs->rx_in_range_errors +
528 drvs->rx_out_range_errors +
529 drvs->rx_frame_too_long +
530 drvs->rx_dropped_too_small +
531 drvs->rx_dropped_too_short +
532 drvs->rx_dropped_header_too_small +
533 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000534 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000537 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000538 drvs->rx_out_range_errors +
539 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000540
Sathya Perlaab1594e2011-07-25 19:10:15 +0000541 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542
543 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000544 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000545
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546 /* receiver fifo overrun */
547 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000548 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000549 drvs->rx_input_fifo_overflow_drop +
550 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000551 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552}
553
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000554void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700555{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700556 struct net_device *netdev = adapter->netdev;
557
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000558 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000559 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000560 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000562
563 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564 netif_carrier_on(netdev);
565 else
566 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567}
568
Sathya Perla3c8def92011-06-12 20:01:58 +0000569static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000570 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571{
Sathya Perla3c8def92011-06-12 20:01:58 +0000572 struct be_tx_stats *stats = tx_stats(txo);
573
Sathya Perlaab1594e2011-07-25 19:10:15 +0000574 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000575 stats->tx_reqs++;
576 stats->tx_wrbs += wrb_cnt;
577 stats->tx_bytes += copied;
578 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000580 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582}
583
584/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000585static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700588 int cnt = (skb->len > skb->data_len);
589
590 cnt += skb_shinfo(skb)->nr_frags;
591
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592 /* to account for hdr wrb */
593 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) || !(cnt & 1)) {
595 *dummy = false;
596 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 /* add a dummy to make it an even num */
598 cnt++;
599 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602 return cnt;
603}
604
605static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606{
607 wrb->frag_pa_hi = upper_32_bits(addr);
608 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000610 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611}
612
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614 struct sk_buff *skb)
615{
616 u8 vlan_prio;
617 u16 vlan_tag;
618
619 vlan_tag = vlan_tx_tag_get(skb);
620 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621 /* If vlan priority provided by OS is NOT in available bmap */
622 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624 adapter->recommended_prio;
625
626 return vlan_tag;
627}
628
Somnath Kotur93040ae2012-06-26 22:32:10 +0000629static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630{
631 return vlan_tx_tag_present(skb) || adapter->pvid;
632}
633
Somnath Koturcc4ce022010-10-21 07:11:14 -0700634static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000637 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700638
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 memset(hdr, 0, sizeof(*hdr));
640
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000643 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000647 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650 if (is_tcp_pkt(skb))
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652 else if (is_udp_pkt(skb))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654 }
655
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700656 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000658 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 }
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666}
667
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000668static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000669 bool unmap_single)
670{
671 dma_addr_t dma;
672
673 be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000676 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000677 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000678 dma_unmap_single(dev, dma, wrb->frag_len,
679 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000680 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000681 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000682 }
683}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
Sathya Perla3c8def92011-06-12 20:01:58 +0000685static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687{
Sathya Perla7101e112010-03-22 20:41:12 +0000688 dma_addr_t busaddr;
689 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000690 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 struct be_eth_wrb *wrb;
693 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000694 bool map_single = false;
695 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 hdr = queue_head_node(txq);
698 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700
David S. Millerebc8d2a2009-06-09 01:01:31 -0700701 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700702 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000705 goto dma_err;
706 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700707 wrb = queue_head_node(txq);
708 wrb_fill(wrb, busaddr, len);
709 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710 queue_head_inc(txq);
711 copied += len;
712 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713
David S. Millerebc8d2a2009-06-09 01:01:31 -0700714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000715 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700716 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000717 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000718 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000719 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000720 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700721 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000722 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000725 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 }
727
728 if (dummy_wrb) {
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, 0, 0);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 }
734
Somnath Koturcc4ce022010-10-21 07:11:14 -0700735 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000739dma_err:
740 txq->head = map_head;
741 while (copied) {
742 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000743 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000744 map_single = false;
745 copied -= wrb->frag_len;
746 queue_head_inc(txq);
747 }
748 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749}
750
Somnath Kotur93040ae2012-06-26 22:32:10 +0000751static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752 struct sk_buff *skb)
753{
754 u16 vlan_tag = 0;
755
756 skb = skb_share_check(skb, GFP_ATOMIC);
757 if (unlikely(!skb))
758 return skb;
759
760 if (vlan_tx_tag_present(skb)) {
761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762 __vlan_put_tag(skb, vlan_tag);
763 skb->vlan_tci = 0;
764 }
765
766 return skb;
767}
768
Stephen Hemminger613573252009-08-31 19:50:58 +0000769static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700770 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771{
772 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000773 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
774 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000775 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000777 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778 bool dummy_wrb, stopped = false;
779
Somnath Kotur93040ae2012-06-26 22:32:10 +0000780 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
781 VLAN_ETH_HLEN : ETH_HLEN;
782
783 /* HW has a bug which considers padding bytes as legal
784 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000785 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000786 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
787 is_ipv4_pkt(skb)) {
788 ip = (struct iphdr *)ip_hdr(skb);
789 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
790 }
791
792 /* HW has a bug wherein it will calculate CSUM for VLAN
793 * pkts even though it is disabled.
794 * Manually insert VLAN in pkt.
795 */
796 if (skb->ip_summed != CHECKSUM_PARTIAL &&
797 be_vlan_tag_chk(adapter, skb)) {
798 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000799 if (unlikely(!skb))
800 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000801 }
802
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000803 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804
Sathya Perla3c8def92011-06-12 20:01:58 +0000805 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000806 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000807 int gso_segs = skb_shinfo(skb)->gso_segs;
808
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000809 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000810 BUG_ON(txo->sent_skb_list[start]);
811 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000813 /* Ensure txq has space for the next skb; Else stop the queue
814 * *BEFORE* ringing the tx doorbell, so that we serialze the
815 * tx compls of the current transmit which'll wake up the queue
816 */
Sathya Perla7101e112010-03-22 20:41:12 +0000817 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000818 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
819 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000820 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000821 stopped = true;
822 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700823
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000824 be_txq_notify(adapter, txq->id, wrb_cnt);
825
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000826 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000827 } else {
828 txq->head = start;
829 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000831tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832 return NETDEV_TX_OK;
833}
834
835static int be_change_mtu(struct net_device *netdev, int new_mtu)
836{
837 struct be_adapter *adapter = netdev_priv(netdev);
838 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000839 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
840 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 dev_info(&adapter->pdev->dev,
842 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000843 BE_MIN_MTU,
844 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845 return -EINVAL;
846 }
847 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
848 netdev->mtu, new_mtu);
849 netdev->mtu = new_mtu;
850 return 0;
851}
852
853/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000854 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856 */
Sathya Perla10329df2012-06-05 19:37:18 +0000857static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858{
Sathya Perla10329df2012-06-05 19:37:18 +0000859 u16 vids[BE_NUM_VLANS_SUPPORTED];
860 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000861 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000862
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000863 /* No need to further configure vids if in promiscuous mode */
864 if (adapter->promiscuous)
865 return 0;
866
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000867 if (adapter->vlans_added > adapter->max_vlans)
868 goto set_vlan_promisc;
869
870 /* Construct VLAN Table to give to HW */
871 for (i = 0; i < VLAN_N_VID; i++)
872 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000873 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000874
875 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000876 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000877
878 /* Set to VLAN promisc mode as setting VLAN filter failed */
879 if (status) {
880 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
881 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
882 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700883 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000884
Sathya Perlab31c50a2009-09-17 10:30:13 -0700885 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000886
887set_vlan_promisc:
888 status = be_cmd_vlan_config(adapter, adapter->if_handle,
889 NULL, 0, 1, 1);
890 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700891}
892
Jiri Pirko8e586132011-12-08 19:52:37 -0500893static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700894{
895 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000896 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000898 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000899 status = -EINVAL;
900 goto ret;
901 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000902
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000903 /* Packets with VID 0 are always received by Lancer by default */
904 if (lancer_chip(adapter) && vid == 0)
905 goto ret;
906
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000908 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000909 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500910
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000911 if (!status)
912 adapter->vlans_added++;
913 else
914 adapter->vlan_tag[vid] = 0;
915ret:
916 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917}
918
Jiri Pirko8e586132011-12-08 19:52:37 -0500919static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700920{
921 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000922 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700923
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000924 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000925 status = -EINVAL;
926 goto ret;
927 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000928
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000929 /* Packets with VID 0 are always received by Lancer by default */
930 if (lancer_chip(adapter) && vid == 0)
931 goto ret;
932
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000934 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000935 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500936
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000937 if (!status)
938 adapter->vlans_added--;
939 else
940 adapter->vlan_tag[vid] = 1;
941ret:
942 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943}
944
Sathya Perlaa54769f2011-10-24 02:45:00 +0000945static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946{
947 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000948 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700949
950 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000951 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000952 adapter->promiscuous = true;
953 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700954 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000955
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300956 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000957 if (adapter->promiscuous) {
958 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000959 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000960
961 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000962 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000963 }
964
Sathya Perlae7b909a2009-11-22 22:01:10 +0000965 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000966 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000967 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000968 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000969 goto done;
970 }
971
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000972 if (netdev_uc_count(netdev) != adapter->uc_macs) {
973 struct netdev_hw_addr *ha;
974 int i = 1; /* First slot is claimed by the Primary MAC */
975
976 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
977 be_cmd_pmac_del(adapter, adapter->if_handle,
978 adapter->pmac_id[i], 0);
979 }
980
981 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
982 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
983 adapter->promiscuous = true;
984 goto done;
985 }
986
987 netdev_for_each_uc_addr(ha, adapter->netdev) {
988 adapter->uc_macs++; /* First slot is for Primary MAC */
989 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
990 adapter->if_handle,
991 &adapter->pmac_id[adapter->uc_macs], 0);
992 }
993 }
994
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000995 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
996
997 /* Set to MCAST promisc mode if setting MULTICAST address fails */
998 if (status) {
999 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1000 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1001 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1002 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001003done:
1004 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001005}
1006
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001007static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1008{
1009 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001010 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001011 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001012 bool active_mac = false;
1013 u32 pmac_id;
1014 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001015
Sathya Perla11ac75e2011-12-13 00:58:50 +00001016 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001017 return -EPERM;
1018
Sathya Perla11ac75e2011-12-13 00:58:50 +00001019 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001020 return -EINVAL;
1021
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001022 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001023 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024 &pmac_id, vf + 1);
1025 if (!status && active_mac)
1026 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027 pmac_id, vf + 1);
1028
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001029 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1030 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001031 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1032 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001033
Sathya Perla11ac75e2011-12-13 00:58:50 +00001034 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1035 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001036 }
1037
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001038 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001039 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1040 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001041 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001042 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001043
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001044 return status;
1045}
1046
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001047static int be_get_vf_config(struct net_device *netdev, int vf,
1048 struct ifla_vf_info *vi)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001051 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001052
Sathya Perla11ac75e2011-12-13 00:58:50 +00001053 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001054 return -EPERM;
1055
Sathya Perla11ac75e2011-12-13 00:58:50 +00001056 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001057 return -EINVAL;
1058
1059 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001060 vi->tx_rate = vf_cfg->tx_rate;
1061 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001062 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001063 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001064
1065 return 0;
1066}
1067
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001068static int be_set_vf_vlan(struct net_device *netdev,
1069 int vf, u16 vlan, u8 qos)
1070{
1071 struct be_adapter *adapter = netdev_priv(netdev);
1072 int status = 0;
1073
Sathya Perla11ac75e2011-12-13 00:58:50 +00001074 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001075 return -EPERM;
1076
Sathya Perla11ac75e2011-12-13 00:58:50 +00001077 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001078 return -EINVAL;
1079
1080 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001081 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1082 /* If this is new value, program it. Else skip. */
1083 adapter->vf_cfg[vf].vlan_tag = vlan;
1084
1085 status = be_cmd_set_hsw_config(adapter, vlan,
1086 vf + 1, adapter->vf_cfg[vf].if_handle);
1087 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001088 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001089 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001090 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001091 vlan = adapter->vf_cfg[vf].def_vid;
1092 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1093 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001094 }
1095
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001096
1097 if (status)
1098 dev_info(&adapter->pdev->dev,
1099 "VLAN %d config on VF %d failed\n", vlan, vf);
1100 return status;
1101}
1102
Ajit Khapardee1d18732010-07-23 01:52:13 +00001103static int be_set_vf_tx_rate(struct net_device *netdev,
1104 int vf, int rate)
1105{
1106 struct be_adapter *adapter = netdev_priv(netdev);
1107 int status = 0;
1108
Sathya Perla11ac75e2011-12-13 00:58:50 +00001109 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001110 return -EPERM;
1111
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001112 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001113 return -EINVAL;
1114
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001115 if (rate < 100 || rate > 10000) {
1116 dev_err(&adapter->pdev->dev,
1117 "tx rate must be between 100 and 10000 Mbps\n");
1118 return -EINVAL;
1119 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001120
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001121 if (lancer_chip(adapter))
1122 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123 else
1124 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001125
1126 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001127 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001128 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001129 else
1130 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001131 return status;
1132}
1133
Sathya Perla39f1d942012-05-08 19:41:24 +00001134static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1135{
1136 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001137 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001138 u16 offset, stride;
1139
1140 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001141 if (!pos)
1142 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001143 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1144 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1145
1146 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1147 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001148 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001149 vfs++;
1150 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1151 assigned_vfs++;
1152 }
1153 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1154 }
1155 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1156}
1157
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001158static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001160 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001161 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001162 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001163 u64 pkts;
1164 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001165
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001166 if (!eqo->enable_aic) {
1167 eqd = eqo->eqd;
1168 goto modify_eqd;
1169 }
1170
1171 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001172 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001174 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175
Sathya Perla4097f662009-03-24 16:40:13 -07001176 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001177 if (time_before(now, stats->rx_jiffies)) {
1178 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001179 return;
1180 }
1181
Sathya Perlaac124ff2011-07-25 19:10:14 +00001182 /* Update once a second */
1183 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001184 return;
1185
Sathya Perlaab1594e2011-07-25 19:10:15 +00001186 do {
1187 start = u64_stats_fetch_begin_bh(&stats->sync);
1188 pkts = stats->rx_pkts;
1189 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1190
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001191 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001192 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001193 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001194 eqd = (stats->rx_pps / 110000) << 3;
1195 eqd = min(eqd, eqo->max_eqd);
1196 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001197 if (eqd < 10)
1198 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001199
1200modify_eqd:
1201 if (eqd != eqo->cur_eqd) {
1202 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1203 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001204 }
Sathya Perla4097f662009-03-24 16:40:13 -07001205}
1206
Sathya Perla3abcded2010-10-03 22:12:27 -07001207static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001208 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001209{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001210 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001211
Sathya Perlaab1594e2011-07-25 19:10:15 +00001212 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001213 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001214 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001215 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001216 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001217 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001218 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001219 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001220 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221}
1222
Sathya Perla2e588f82011-03-11 02:49:26 +00001223static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001224{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001225 /* L4 checksum is not reliable for non TCP/UDP packets.
1226 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1228 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001229}
1230
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001231static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1232 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001234 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001236 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237
Sathya Perla3abcded2010-10-03 22:12:27 -07001238 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239 BUG_ON(!rx_page_info->page);
1240
Ajit Khaparde205859a2010-02-09 01:34:21 +00001241 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001242 dma_unmap_page(&adapter->pdev->dev,
1243 dma_unmap_addr(rx_page_info, bus),
1244 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001245 rx_page_info->last_page_user = false;
1246 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247
1248 atomic_dec(&rxq->used);
1249 return rx_page_info;
1250}
1251
1252/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001253static void be_rx_compl_discard(struct be_rx_obj *rxo,
1254 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255{
Sathya Perla3abcded2010-10-03 22:12:27 -07001256 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001258 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001260 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001262 put_page(page_info->page);
1263 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001264 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265 }
1266}
1267
1268/*
1269 * skb_fill_rx_data forms a complete skb for an ether frame
1270 * indicated by rxcp.
1271 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001272static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1273 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274{
Sathya Perla3abcded2010-10-03 22:12:27 -07001275 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001277 u16 i, j;
1278 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279 u8 *start;
1280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001281 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282 start = page_address(page_info->page) + page_info->page_offset;
1283 prefetch(start);
1284
1285 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001286 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288 skb->len = curr_frag_len;
1289 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001290 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291 /* Complete packet has now been moved to data */
1292 put_page(page_info->page);
1293 skb->data_len = 0;
1294 skb->tail += curr_frag_len;
1295 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001296 hdr_len = ETH_HLEN;
1297 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001298 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001299 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300 skb_shinfo(skb)->frags[0].page_offset =
1301 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001302 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001304 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305 skb->tail += hdr_len;
1306 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001307 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308
Sathya Perla2e588f82011-03-11 02:49:26 +00001309 if (rxcp->pkt_size <= rx_frag_size) {
1310 BUG_ON(rxcp->num_rcvd != 1);
1311 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312 }
1313
1314 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 index_inc(&rxcp->rxq_idx, rxq->len);
1316 remaining = rxcp->pkt_size - curr_frag_len;
1317 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001318 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001319 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001321 /* Coalesce all frags from the same physical page in one slot */
1322 if (page_info->page_offset == 0) {
1323 /* Fresh page */
1324 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001325 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001326 skb_shinfo(skb)->frags[j].page_offset =
1327 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001328 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001329 skb_shinfo(skb)->nr_frags++;
1330 } else {
1331 put_page(page_info->page);
1332 }
1333
Eric Dumazet9e903e02011-10-18 21:00:24 +00001334 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335 skb->len += curr_frag_len;
1336 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001337 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001338 remaining -= curr_frag_len;
1339 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001340 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001342 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001343}
1344
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001345/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001346static void be_rx_compl_process(struct be_rx_obj *rxo,
1347 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001349 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001350 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001351 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001352
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001353 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001354 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001355 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001356 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357 return;
1358 }
1359
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001360 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001362 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001363 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001364 else
1365 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001367 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001369 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001370 skb->rxhash = rxcp->rss_hash;
1371
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372
Jiri Pirko343e43c2011-08-25 02:50:51 +00001373 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001374 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
1376 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377}
1378
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001379/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001380void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1381 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001383 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001385 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001386 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001387 u16 remaining, curr_frag_len;
1388 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001389
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001390 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001391 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001392 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001393 return;
1394 }
1395
Sathya Perla2e588f82011-03-11 02:49:26 +00001396 remaining = rxcp->pkt_size;
1397 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001398 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399
1400 curr_frag_len = min(remaining, rx_frag_size);
1401
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001402 /* Coalesce all frags from the same physical page in one slot */
1403 if (i == 0 || page_info->page_offset == 0) {
1404 /* First frag or Fresh page */
1405 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001406 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001407 skb_shinfo(skb)->frags[j].page_offset =
1408 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001409 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001410 } else {
1411 put_page(page_info->page);
1412 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001413 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001414 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001416 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 memset(page_info, 0, sizeof(*page_info));
1418 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001419 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001421 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001422 skb->len = rxcp->pkt_size;
1423 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001424 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001425 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001426 if (adapter->netdev->features & NETIF_F_RXHASH)
1427 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001428
Jiri Pirko343e43c2011-08-25 02:50:51 +00001429 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001430 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1431
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001432 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433}
1434
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001435static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1436 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437{
Sathya Perla2e588f82011-03-11 02:49:26 +00001438 rxcp->pkt_size =
1439 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1440 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1441 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1442 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001443 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001444 rxcp->ip_csum =
1445 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1446 rxcp->l4_csum =
1447 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1448 rxcp->ipv6 =
1449 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1450 rxcp->rxq_idx =
1451 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1452 rxcp->num_rcvd =
1453 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1454 rxcp->pkt_type =
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001456 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001457 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001458 if (rxcp->vlanf) {
1459 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001460 compl);
1461 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1462 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001463 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001464 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001465}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001467static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1468 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001469{
1470 rxcp->pkt_size =
1471 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1472 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1473 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1474 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001475 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001476 rxcp->ip_csum =
1477 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1478 rxcp->l4_csum =
1479 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1480 rxcp->ipv6 =
1481 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1482 rxcp->rxq_idx =
1483 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1484 rxcp->num_rcvd =
1485 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1486 rxcp->pkt_type =
1487 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001488 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001489 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001490 if (rxcp->vlanf) {
1491 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001492 compl);
1493 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1494 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001495 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001496 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001497}
1498
1499static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1500{
1501 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1502 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1503 struct be_adapter *adapter = rxo->adapter;
1504
1505 /* For checking the valid bit it is Ok to use either definition as the
1506 * valid bit is at the same position in both v0 and v1 Rx compl */
1507 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508 return NULL;
1509
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001510 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001511 be_dws_le_to_cpu(compl, sizeof(*compl));
1512
1513 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001514 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001515 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001516 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001517
Sathya Perla15d72182011-03-21 20:49:26 +00001518 if (rxcp->vlanf) {
1519 /* vlanf could be wrongly set in some cards.
1520 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001521 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001522 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001523
Sathya Perla15d72182011-03-21 20:49:26 +00001524 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001525 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001526
Somnath Kotur939cf302011-08-18 21:51:49 -07001527 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001528 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001529 rxcp->vlanf = 0;
1530 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001531
1532 /* As the compl has been parsed, reset it; we wont touch it again */
1533 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001534
Sathya Perla3abcded2010-10-03 22:12:27 -07001535 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 return rxcp;
1537}
1538
Eric Dumazet1829b082011-03-01 05:48:12 +00001539static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001542
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001544 gfp |= __GFP_COMP;
1545 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546}
1547
1548/*
1549 * Allocate a page, split it to fragments of size rx_frag_size and post as
1550 * receive buffers to BE
1551 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001552static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553{
Sathya Perla3abcded2010-10-03 22:12:27 -07001554 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001555 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001556 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557 struct page *pagep = NULL;
1558 struct be_eth_rx_d *rxd;
1559 u64 page_dmaaddr = 0, frag_dmaaddr;
1560 u32 posted, page_offset = 0;
1561
Sathya Perla3abcded2010-10-03 22:12:27 -07001562 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1564 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001565 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001567 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568 break;
1569 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001570 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1571 0, adapter->big_page_size,
1572 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573 page_info->page_offset = 0;
1574 } else {
1575 get_page(pagep);
1576 page_info->page_offset = page_offset + rx_frag_size;
1577 }
1578 page_offset = page_info->page_offset;
1579 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001580 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1582
1583 rxd = queue_head_node(rxq);
1584 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1585 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586
1587 /* Any space left in the current big page for another frag? */
1588 if ((page_offset + rx_frag_size + rx_frag_size) >
1589 adapter->big_page_size) {
1590 pagep = NULL;
1591 page_info->last_page_user = true;
1592 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001593
1594 prev_page_info = page_info;
1595 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001596 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 }
1598 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001599 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600
1601 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001603 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001604 } else if (atomic_read(&rxq->used) == 0) {
1605 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001606 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608}
1609
Sathya Perla5fb379e2009-06-18 00:02:59 +00001610static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1613
1614 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1615 return NULL;
1616
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001617 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1619
1620 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1621
1622 queue_tail_inc(tx_cq);
1623 return txcp;
1624}
1625
Sathya Perla3c8def92011-06-12 20:01:58 +00001626static u16 be_tx_compl_process(struct be_adapter *adapter,
1627 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628{
Sathya Perla3c8def92011-06-12 20:01:58 +00001629 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001630 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001631 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001633 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1634 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001636 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001638 sent_skbs[txq->tail] = NULL;
1639
1640 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001641 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001643 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001645 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001646 unmap_tx_frag(&adapter->pdev->dev, wrb,
1647 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001648 unmap_skb_hdr = false;
1649
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650 num_wrbs++;
1651 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001652 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001655 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656}
1657
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001658/* Return the number of events in the event queue */
1659static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001660{
1661 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001662 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001663
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664 do {
1665 eqe = queue_tail_node(&eqo->q);
1666 if (eqe->evt == 0)
1667 break;
1668
1669 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001670 eqe->evt = 0;
1671 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001672 queue_tail_inc(&eqo->q);
1673 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001674
1675 return num;
1676}
1677
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001678static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001679{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001680 bool rearm = false;
1681 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001682
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001683 /* Deal with any spurious interrupts that come without events */
1684 if (!num)
1685 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001686
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001687 if (num || msix_enabled(eqo->adapter))
1688 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1689
Sathya Perla859b1e42009-08-10 03:43:51 +00001690 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001691 napi_schedule(&eqo->napi);
1692
1693 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001694}
1695
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001696/* Leaves the EQ is disarmed state */
1697static void be_eq_clean(struct be_eq_obj *eqo)
1698{
1699 int num = events_get(eqo);
1700
1701 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1702}
1703
1704static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705{
1706 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001707 struct be_queue_info *rxq = &rxo->q;
1708 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001709 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 u16 tail;
1711
1712 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001713 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001714 be_rx_compl_discard(rxo, rxcp);
1715 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 }
1717
1718 /* Then free posted rx buffer that were not used */
1719 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001720 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001721 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 put_page(page_info->page);
1723 memset(page_info, 0, sizeof(*page_info));
1724 }
1725 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001726 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727}
1728
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001729static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001731 struct be_tx_obj *txo;
1732 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001733 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001734 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001735 struct sk_buff *sent_skb;
1736 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001737 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738
Sathya Perlaa8e91792009-08-10 03:42:43 +00001739 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1740 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001741 pending_txqs = adapter->num_tx_qs;
1742
1743 for_all_tx_queues(adapter, txo, i) {
1744 txq = &txo->q;
1745 while ((txcp = be_tx_compl_get(&txo->cq))) {
1746 end_idx =
1747 AMAP_GET_BITS(struct amap_eth_tx_compl,
1748 wrb_index, txcp);
1749 num_wrbs += be_tx_compl_process(adapter, txo,
1750 end_idx);
1751 cmpl++;
1752 }
1753 if (cmpl) {
1754 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1755 atomic_sub(num_wrbs, &txq->used);
1756 cmpl = 0;
1757 num_wrbs = 0;
1758 }
1759 if (atomic_read(&txq->used) == 0)
1760 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001761 }
1762
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001763 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001764 break;
1765
1766 mdelay(1);
1767 } while (true);
1768
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001769 for_all_tx_queues(adapter, txo, i) {
1770 txq = &txo->q;
1771 if (atomic_read(&txq->used))
1772 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1773 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001774
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001775 /* free posted tx for which compls will never arrive */
1776 while (atomic_read(&txq->used)) {
1777 sent_skb = txo->sent_skb_list[txq->tail];
1778 end_idx = txq->tail;
1779 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1780 &dummy_wrb);
1781 index_adv(&end_idx, num_wrbs - 1, txq->len);
1782 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1783 atomic_sub(num_wrbs, &txq->used);
1784 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001785 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786}
1787
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001788static void be_evt_queues_destroy(struct be_adapter *adapter)
1789{
1790 struct be_eq_obj *eqo;
1791 int i;
1792
1793 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001794 if (eqo->q.created) {
1795 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001796 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001797 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001798 be_queue_free(adapter, &eqo->q);
1799 }
1800}
1801
1802static int be_evt_queues_create(struct be_adapter *adapter)
1803{
1804 struct be_queue_info *eq;
1805 struct be_eq_obj *eqo;
1806 int i, rc;
1807
1808 adapter->num_evt_qs = num_irqs(adapter);
1809
1810 for_all_evt_queues(adapter, eqo, i) {
1811 eqo->adapter = adapter;
1812 eqo->tx_budget = BE_TX_BUDGET;
1813 eqo->idx = i;
1814 eqo->max_eqd = BE_MAX_EQD;
1815 eqo->enable_aic = true;
1816
1817 eq = &eqo->q;
1818 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1819 sizeof(struct be_eq_entry));
1820 if (rc)
1821 return rc;
1822
1823 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1824 if (rc)
1825 return rc;
1826 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001827 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001828}
1829
Sathya Perla5fb379e2009-06-18 00:02:59 +00001830static void be_mcc_queues_destroy(struct be_adapter *adapter)
1831{
1832 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001833
Sathya Perla8788fdc2009-07-27 22:52:03 +00001834 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001835 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001836 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001837 be_queue_free(adapter, q);
1838
Sathya Perla8788fdc2009-07-27 22:52:03 +00001839 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001840 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001841 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001842 be_queue_free(adapter, q);
1843}
1844
1845/* Must be called only after TX qs are created as MCC shares TX EQ */
1846static int be_mcc_queues_create(struct be_adapter *adapter)
1847{
1848 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001849
Sathya Perla8788fdc2009-07-27 22:52:03 +00001850 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001851 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001852 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001853 goto err;
1854
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001855 /* Use the default EQ for MCC completions */
1856 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001857 goto mcc_cq_free;
1858
Sathya Perla8788fdc2009-07-27 22:52:03 +00001859 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001860 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1861 goto mcc_cq_destroy;
1862
Sathya Perla8788fdc2009-07-27 22:52:03 +00001863 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001864 goto mcc_q_free;
1865
1866 return 0;
1867
1868mcc_q_free:
1869 be_queue_free(adapter, q);
1870mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001871 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001872mcc_cq_free:
1873 be_queue_free(adapter, cq);
1874err:
1875 return -1;
1876}
1877
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878static void be_tx_queues_destroy(struct be_adapter *adapter)
1879{
1880 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001881 struct be_tx_obj *txo;
1882 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883
Sathya Perla3c8def92011-06-12 20:01:58 +00001884 for_all_tx_queues(adapter, txo, i) {
1885 q = &txo->q;
1886 if (q->created)
1887 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1888 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889
Sathya Perla3c8def92011-06-12 20:01:58 +00001890 q = &txo->cq;
1891 if (q->created)
1892 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1893 be_queue_free(adapter, q);
1894 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895}
1896
Sathya Perladafc0fe2011-10-24 02:45:02 +00001897static int be_num_txqs_want(struct be_adapter *adapter)
1898{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001899 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1900 be_is_mc(adapter) ||
1901 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00001902 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00001903 return 1;
1904 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001905 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001906}
1907
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001910 struct be_queue_info *cq, *eq;
1911 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001912 struct be_tx_obj *txo;
1913 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914
Sathya Perladafc0fe2011-10-24 02:45:02 +00001915 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001916 if (adapter->num_tx_qs != MAX_TX_QS) {
1917 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001918 netif_set_real_num_tx_queues(adapter->netdev,
1919 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001920 rtnl_unlock();
1921 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001922
Sathya Perla3c8def92011-06-12 20:01:58 +00001923 for_all_tx_queues(adapter, txo, i) {
1924 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1926 sizeof(struct be_eth_tx_compl));
1927 if (status)
1928 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001930 /* If num_evt_qs is less than num_tx_qs, then more than
1931 * one txq share an eq
1932 */
1933 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1934 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1935 if (status)
1936 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001937 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939}
1940
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001941static int be_tx_qs_create(struct be_adapter *adapter)
1942{
1943 struct be_tx_obj *txo;
1944 int i, status;
1945
1946 for_all_tx_queues(adapter, txo, i) {
1947 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1948 sizeof(struct be_eth_wrb));
1949 if (status)
1950 return status;
1951
1952 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1953 if (status)
1954 return status;
1955 }
1956
Sathya Perlad3791422012-09-28 04:39:44 +00001957 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1958 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 return 0;
1960}
1961
1962static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963{
1964 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001965 struct be_rx_obj *rxo;
1966 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967
Sathya Perla3abcded2010-10-03 22:12:27 -07001968 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001969 q = &rxo->cq;
1970 if (q->created)
1971 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1972 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974}
1975
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001976static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001977{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001979 struct be_rx_obj *rxo;
1980 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982 /* We'll create as many RSS rings as there are irqs.
1983 * But when there's only one irq there's no use creating RSS rings
1984 */
1985 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1986 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001987 if (adapter->num_rx_qs != MAX_RX_QS) {
1988 rtnl_lock();
1989 netif_set_real_num_rx_queues(adapter->netdev,
1990 adapter->num_rx_qs);
1991 rtnl_unlock();
1992 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001993
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001995 for_all_rx_queues(adapter, rxo, i) {
1996 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001997 cq = &rxo->cq;
1998 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1999 sizeof(struct be_eth_rx_compl));
2000 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002001 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002003 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2004 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002005 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002006 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002007 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008
Sathya Perlad3791422012-09-28 04:39:44 +00002009 dev_info(&adapter->pdev->dev,
2010 "created %d RSS queue(s) and 1 default RX queue\n",
2011 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002013}
2014
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015static irqreturn_t be_intx(int irq, void *dev)
2016{
2017 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002020 /* With INTx only one EQ is used */
2021 num_evts = event_handle(&adapter->eq_obj[0]);
2022 if (num_evts)
2023 return IRQ_HANDLED;
2024 else
2025 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026}
2027
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002028static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002030 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033 return IRQ_HANDLED;
2034}
2035
Sathya Perla2e588f82011-03-11 02:49:26 +00002036static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037{
Sathya Perla2e588f82011-03-11 02:49:26 +00002038 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039}
2040
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002041static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2042 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043{
Sathya Perla3abcded2010-10-03 22:12:27 -07002044 struct be_adapter *adapter = rxo->adapter;
2045 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002046 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047 u32 work_done;
2048
2049 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002050 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051 if (!rxcp)
2052 break;
2053
Sathya Perla12004ae2011-08-02 19:57:46 +00002054 /* Is it a flush compl that has no data */
2055 if (unlikely(rxcp->num_rcvd == 0))
2056 goto loop_continue;
2057
2058 /* Discard compl with partial DMA Lancer B0 */
2059 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002060 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002061 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002062 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002063
Sathya Perla12004ae2011-08-02 19:57:46 +00002064 /* On BE drop pkts that arrive due to imperfect filtering in
2065 * promiscuous mode on some skews
2066 */
2067 if (unlikely(rxcp->port != adapter->port_num &&
2068 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002069 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002070 goto loop_continue;
2071 }
2072
2073 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002074 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002075 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002076 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002077loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002078 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079 }
2080
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002081 if (work_done) {
2082 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002083
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002084 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2085 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002086 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002087
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002088 return work_done;
2089}
2090
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002091static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2092 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002095 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002096
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002097 for (work_done = 0; work_done < budget; work_done++) {
2098 txcp = be_tx_compl_get(&txo->cq);
2099 if (!txcp)
2100 break;
2101 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002102 AMAP_GET_BITS(struct amap_eth_tx_compl,
2103 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002104 }
2105
2106 if (work_done) {
2107 be_cq_notify(adapter, txo->cq.id, true, work_done);
2108 atomic_sub(num_wrbs, &txo->q.used);
2109
2110 /* As Tx wrbs have been freed up, wake up netdev queue
2111 * if it was stopped due to lack of tx wrbs. */
2112 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2113 atomic_read(&txo->q.used) < txo->q.len / 2) {
2114 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002115 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002116
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002117 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2118 tx_stats(txo)->tx_compl += work_done;
2119 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2120 }
2121 return (work_done < budget); /* Done */
2122}
Sathya Perla3c8def92011-06-12 20:01:58 +00002123
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124int be_poll(struct napi_struct *napi, int budget)
2125{
2126 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2127 struct be_adapter *adapter = eqo->adapter;
2128 int max_work = 0, work, i;
2129 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002130
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002131 /* Process all TXQs serviced by this EQ */
2132 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2133 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2134 eqo->tx_budget, i);
2135 if (!tx_done)
2136 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137 }
2138
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002139 /* This loop will iterate twice for EQ0 in which
2140 * completions of the last RXQ (default one) are also processed
2141 * For other EQs the loop iterates only once
2142 */
2143 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2144 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2145 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002146 }
2147
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002148 if (is_mcc_eqo(eqo))
2149 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002150
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002151 if (max_work < budget) {
2152 napi_complete(napi);
2153 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2154 } else {
2155 /* As we'll continue in polling mode, count and clear events */
2156 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002157 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002158 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002159}
2160
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002161void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002162{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002163 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2164 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002165 u32 i;
2166
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002167 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002168 return;
2169
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002170 if (lancer_chip(adapter)) {
2171 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2172 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2173 sliport_err1 = ioread32(adapter->db +
2174 SLIPORT_ERROR1_OFFSET);
2175 sliport_err2 = ioread32(adapter->db +
2176 SLIPORT_ERROR2_OFFSET);
2177 }
2178 } else {
2179 pci_read_config_dword(adapter->pdev,
2180 PCICFG_UE_STATUS_LOW, &ue_lo);
2181 pci_read_config_dword(adapter->pdev,
2182 PCICFG_UE_STATUS_HIGH, &ue_hi);
2183 pci_read_config_dword(adapter->pdev,
2184 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2185 pci_read_config_dword(adapter->pdev,
2186 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002187
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002188 ue_lo = (ue_lo & ~ue_lo_mask);
2189 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002190 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002191
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002192 /* On certain platforms BE hardware can indicate spurious UEs.
2193 * Allow the h/w to stop working completely in case of a real UE.
2194 * Hence not setting the hw_error for UE detection.
2195 */
2196 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002197 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002198 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002199 "Error detected in the card\n");
2200 }
2201
2202 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2203 dev_err(&adapter->pdev->dev,
2204 "ERR: sliport status 0x%x\n", sliport_status);
2205 dev_err(&adapter->pdev->dev,
2206 "ERR: sliport error1 0x%x\n", sliport_err1);
2207 dev_err(&adapter->pdev->dev,
2208 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002209 }
2210
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002211 if (ue_lo) {
2212 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2213 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002214 dev_err(&adapter->pdev->dev,
2215 "UE: %s bit set\n", ue_status_low_desc[i]);
2216 }
2217 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002218
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002219 if (ue_hi) {
2220 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2221 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002222 dev_err(&adapter->pdev->dev,
2223 "UE: %s bit set\n", ue_status_hi_desc[i]);
2224 }
2225 }
2226
2227}
2228
Sathya Perla8d56ff12009-11-22 22:02:26 +00002229static void be_msix_disable(struct be_adapter *adapter)
2230{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002231 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002232 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002233 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002234 }
2235}
2236
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002237static uint be_num_rss_want(struct be_adapter *adapter)
2238{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002239 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002240
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002242 (lancer_chip(adapter) ||
2243 (!sriov_want(adapter) && be_physfn(adapter)))) {
2244 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002245 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2246 }
2247 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002248}
2249
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250static void be_msix_enable(struct be_adapter *adapter)
2251{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002253 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002254 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256 /* If RSS queues are not used, need a vec for default RX Q */
2257 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002258 if (be_roce_supported(adapter)) {
2259 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2260 (num_online_cpus() + 1));
2261 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2262 num_vec += num_roce_vec;
2263 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2264 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002266
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002267 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268 adapter->msix_entries[i].entry = i;
2269
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002270 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002271 if (status == 0) {
2272 goto done;
2273 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002274 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002275 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002276 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002277 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002278 }
Sathya Perlad3791422012-09-28 04:39:44 +00002279
2280 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002281 return;
2282done:
Parav Pandit045508a2012-03-26 14:27:13 +00002283 if (be_roce_supported(adapter)) {
2284 if (num_vec > num_roce_vec) {
2285 adapter->num_msix_vec = num_vec - num_roce_vec;
2286 adapter->num_msix_roce_vec =
2287 num_vec - adapter->num_msix_vec;
2288 } else {
2289 adapter->num_msix_vec = num_vec;
2290 adapter->num_msix_roce_vec = 0;
2291 }
2292 } else
2293 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002294 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002295 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296}
2297
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002298static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002301 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302}
2303
2304static int be_msix_register(struct be_adapter *adapter)
2305{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002306 struct net_device *netdev = adapter->netdev;
2307 struct be_eq_obj *eqo;
2308 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310 for_all_evt_queues(adapter, eqo, i) {
2311 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2312 vec = be_msix_vec_get(adapter, eqo);
2313 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002314 if (status)
2315 goto err_msix;
2316 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002317
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002319err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002320 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2321 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2322 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2323 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002324 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325 return status;
2326}
2327
2328static int be_irq_register(struct be_adapter *adapter)
2329{
2330 struct net_device *netdev = adapter->netdev;
2331 int status;
2332
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002333 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334 status = be_msix_register(adapter);
2335 if (status == 0)
2336 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002337 /* INTx is not supported for VF */
2338 if (!be_physfn(adapter))
2339 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340 }
2341
2342 /* INTx */
2343 netdev->irq = adapter->pdev->irq;
2344 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2345 adapter);
2346 if (status) {
2347 dev_err(&adapter->pdev->dev,
2348 "INTx request IRQ failed - err %d\n", status);
2349 return status;
2350 }
2351done:
2352 adapter->isr_registered = true;
2353 return 0;
2354}
2355
2356static void be_irq_unregister(struct be_adapter *adapter)
2357{
2358 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002359 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002360 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002361
2362 if (!adapter->isr_registered)
2363 return;
2364
2365 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002366 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367 free_irq(netdev->irq, adapter);
2368 goto done;
2369 }
2370
2371 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 for_all_evt_queues(adapter, eqo, i)
2373 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002374
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002375done:
2376 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377}
2378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002380{
2381 struct be_queue_info *q;
2382 struct be_rx_obj *rxo;
2383 int i;
2384
2385 for_all_rx_queues(adapter, rxo, i) {
2386 q = &rxo->q;
2387 if (q->created) {
2388 be_cmd_rxq_destroy(adapter, q);
2389 /* After the rxq is invalidated, wait for a grace time
2390 * of 1ms for all dma to end and the flush compl to
2391 * arrive
2392 */
2393 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002394 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002395 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002397 }
2398}
2399
Sathya Perla889cd4b2010-05-30 23:33:45 +00002400static int be_close(struct net_device *netdev)
2401{
2402 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403 struct be_eq_obj *eqo;
2404 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002405
Parav Pandit045508a2012-03-26 14:27:13 +00002406 be_roce_dev_close(adapter);
2407
Sathya Perla889cd4b2010-05-30 23:33:45 +00002408 be_async_mcc_disable(adapter);
2409
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002410 if (!lancer_chip(adapter))
2411 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002412
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 for_all_evt_queues(adapter, eqo, i) {
2414 napi_disable(&eqo->napi);
2415 if (msix_enabled(adapter))
2416 synchronize_irq(be_msix_vec_get(adapter, eqo));
2417 else
2418 synchronize_irq(netdev->irq);
2419 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002420 }
2421
Sathya Perla889cd4b2010-05-30 23:33:45 +00002422 be_irq_unregister(adapter);
2423
Sathya Perla889cd4b2010-05-30 23:33:45 +00002424 /* Wait for all pending tx completions to arrive so that
2425 * all tx skbs are freed.
2426 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002427 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002428
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002429 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002430 return 0;
2431}
2432
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002434{
2435 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002436 int rc, i, j;
2437 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002438
2439 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2441 sizeof(struct be_eth_rx_d));
2442 if (rc)
2443 return rc;
2444 }
2445
2446 /* The FW would like the default RXQ to be created first */
2447 rxo = default_rxo(adapter);
2448 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2449 adapter->if_handle, false, &rxo->rss_id);
2450 if (rc)
2451 return rc;
2452
2453 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002454 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002455 rx_frag_size, adapter->if_handle,
2456 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002457 if (rc)
2458 return rc;
2459 }
2460
2461 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002462 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2463 for_all_rss_queues(adapter, rxo, i) {
2464 if ((j + i) >= 128)
2465 break;
2466 rsstable[j + i] = rxo->rss_id;
2467 }
2468 }
2469 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002470 if (rc)
2471 return rc;
2472 }
2473
2474 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002475 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002476 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002477 return 0;
2478}
2479
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480static int be_open(struct net_device *netdev)
2481{
2482 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002483 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002484 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002485 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002486 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002487 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002488
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002489 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002490 if (status)
2491 goto err;
2492
Sathya Perla5fb379e2009-06-18 00:02:59 +00002493 be_irq_register(adapter);
2494
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002495 if (!lancer_chip(adapter))
2496 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002497
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002498 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002499 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002500
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002501 for_all_tx_queues(adapter, txo, i)
2502 be_cq_notify(adapter, txo->cq.id, true, 0);
2503
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002504 be_async_mcc_enable(adapter);
2505
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002506 for_all_evt_queues(adapter, eqo, i) {
2507 napi_enable(&eqo->napi);
2508 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2509 }
2510
Sathya Perla323ff712012-09-28 04:39:43 +00002511 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002512 if (!status)
2513 be_link_status_update(adapter, link_status);
2514
Parav Pandit045508a2012-03-26 14:27:13 +00002515 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002516 return 0;
2517err:
2518 be_close(adapter->netdev);
2519 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002520}
2521
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002522static int be_setup_wol(struct be_adapter *adapter, bool enable)
2523{
2524 struct be_dma_mem cmd;
2525 int status = 0;
2526 u8 mac[ETH_ALEN];
2527
2528 memset(mac, 0, ETH_ALEN);
2529
2530 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002531 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2532 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002533 if (cmd.va == NULL)
2534 return -1;
2535 memset(cmd.va, 0, cmd.size);
2536
2537 if (enable) {
2538 status = pci_write_config_dword(adapter->pdev,
2539 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2540 if (status) {
2541 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002542 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002543 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2544 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002545 return status;
2546 }
2547 status = be_cmd_enable_magic_wol(adapter,
2548 adapter->netdev->dev_addr, &cmd);
2549 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2550 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2551 } else {
2552 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2553 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2554 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2555 }
2556
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002557 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002558 return status;
2559}
2560
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002561/*
2562 * Generate a seed MAC address from the PF MAC Address using jhash.
2563 * MAC Address for VFs are assigned incrementally starting from the seed.
2564 * These addresses are programmed in the ASIC by the PF and the VF driver
2565 * queries for the MAC address during its probe.
2566 */
2567static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2568{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002569 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002570 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002571 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002572 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002573
2574 be_vf_eth_addr_generate(adapter, mac);
2575
Sathya Perla11ac75e2011-12-13 00:58:50 +00002576 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002577 if (lancer_chip(adapter)) {
2578 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2579 } else {
2580 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002581 vf_cfg->if_handle,
2582 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002583 }
2584
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002585 if (status)
2586 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002587 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002588 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002589 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002590
2591 mac[5] += 1;
2592 }
2593 return status;
2594}
2595
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002596static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002597{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002598 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002599 u32 vf;
2600
Sathya Perla39f1d942012-05-08 19:41:24 +00002601 if (be_find_vfs(adapter, ASSIGNED)) {
2602 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2603 goto done;
2604 }
2605
Sathya Perla11ac75e2011-12-13 00:58:50 +00002606 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002607 if (lancer_chip(adapter))
2608 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2609 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002610 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2611 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002612
Sathya Perla11ac75e2011-12-13 00:58:50 +00002613 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2614 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002615 pci_disable_sriov(adapter->pdev);
2616done:
2617 kfree(adapter->vf_cfg);
2618 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002619}
2620
Sathya Perlaa54769f2011-10-24 02:45:00 +00002621static int be_clear(struct be_adapter *adapter)
2622{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002623 int i = 1;
2624
Sathya Perla191eb752012-02-23 18:50:13 +00002625 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2626 cancel_delayed_work_sync(&adapter->work);
2627 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2628 }
2629
Sathya Perla11ac75e2011-12-13 00:58:50 +00002630 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002631 be_vf_clear(adapter);
2632
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002633 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2634 be_cmd_pmac_del(adapter, adapter->if_handle,
2635 adapter->pmac_id[i], 0);
2636
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002637 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002638
2639 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002640 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002641 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002642 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002643
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002644 kfree(adapter->pmac_id);
2645 adapter->pmac_id = NULL;
2646
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002647 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002648 return 0;
2649}
2650
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002651static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2652 u32 *cap_flags, u8 domain)
2653{
2654 bool profile_present = false;
2655 int status;
2656
2657 if (lancer_chip(adapter)) {
2658 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2659 if (!status)
2660 profile_present = true;
2661 }
2662
2663 if (!profile_present)
2664 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2665 BE_IF_FLAGS_MULTICAST;
2666}
2667
Sathya Perla39f1d942012-05-08 19:41:24 +00002668static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002669{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002670 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002671 int vf;
2672
Sathya Perla39f1d942012-05-08 19:41:24 +00002673 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2674 GFP_KERNEL);
2675 if (!adapter->vf_cfg)
2676 return -ENOMEM;
2677
Sathya Perla11ac75e2011-12-13 00:58:50 +00002678 for_all_vfs(adapter, vf_cfg, vf) {
2679 vf_cfg->if_handle = -1;
2680 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002681 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002682 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002683}
2684
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002685static int be_vf_setup(struct be_adapter *adapter)
2686{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002687 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002688 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002689 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002690 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002691 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002692
Sathya Perla39f1d942012-05-08 19:41:24 +00002693 enabled_vfs = be_find_vfs(adapter, ENABLED);
2694 if (enabled_vfs) {
2695 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2696 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2697 return 0;
2698 }
2699
2700 if (num_vfs > adapter->dev_num_vfs) {
2701 dev_warn(dev, "Device supports %d VFs and not %d\n",
2702 adapter->dev_num_vfs, num_vfs);
2703 num_vfs = adapter->dev_num_vfs;
2704 }
2705
2706 status = pci_enable_sriov(adapter->pdev, num_vfs);
2707 if (!status) {
2708 adapter->num_vfs = num_vfs;
2709 } else {
2710 /* Platform doesn't support SRIOV though device supports it */
2711 dev_warn(dev, "SRIOV enable failed\n");
2712 return 0;
2713 }
2714
2715 status = be_vf_setup_init(adapter);
2716 if (status)
2717 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002718
Sathya Perla11ac75e2011-12-13 00:58:50 +00002719 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002720 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2721
2722 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2723 BE_IF_FLAGS_BROADCAST |
2724 BE_IF_FLAGS_MULTICAST);
2725
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002726 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2727 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002728 if (status)
2729 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002730 }
2731
Sathya Perla39f1d942012-05-08 19:41:24 +00002732 if (!enabled_vfs) {
2733 status = be_vf_eth_addr_config(adapter);
2734 if (status)
2735 goto err;
2736 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002737
Sathya Perla11ac75e2011-12-13 00:58:50 +00002738 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002739 lnk_speed = 1000;
2740 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002741 if (status)
2742 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002743 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002744
2745 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2746 vf + 1, vf_cfg->if_handle);
2747 if (status)
2748 goto err;
2749 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002750
2751 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002752 }
2753 return 0;
2754err:
2755 return status;
2756}
2757
Sathya Perla30128032011-11-10 19:17:57 +00002758static void be_setup_init(struct be_adapter *adapter)
2759{
2760 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002761 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002762 adapter->if_handle = -1;
2763 adapter->be3_native = false;
2764 adapter->promiscuous = false;
2765 adapter->eq_next_idx = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002766
2767 if (be_physfn(adapter))
2768 adapter->cmd_privileges = MAX_PRIVILEGES;
2769 else
2770 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002771}
2772
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002773static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2774 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002775{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002776 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002777
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002778 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2779 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2780 if (!lancer_chip(adapter) && !be_physfn(adapter))
2781 *active_mac = true;
2782 else
2783 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002784
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002785 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002786 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002787
2788 if (lancer_chip(adapter)) {
2789 status = be_cmd_get_mac_from_list(adapter, mac,
2790 active_mac, pmac_id, 0);
2791 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002792 status = be_cmd_mac_addr_query(adapter, mac, false,
2793 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002794 }
2795 } else if (be_physfn(adapter)) {
2796 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002797 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002798 *active_mac = false;
2799 } else {
2800 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002801 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002802 if_handle, 0);
2803 *active_mac = true;
2804 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002805 return status;
2806}
2807
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002808static void be_get_resources(struct be_adapter *adapter)
2809{
2810 int status;
2811 bool profile_present = false;
2812
2813 if (lancer_chip(adapter)) {
2814 status = be_cmd_get_func_config(adapter);
2815
2816 if (!status)
2817 profile_present = true;
2818 }
2819
2820 if (profile_present) {
2821 /* Sanity fixes for Lancer */
2822 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2823 BE_UC_PMAC_COUNT);
2824 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2825 BE_NUM_VLANS_SUPPORTED);
2826 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2827 BE_MAX_MC);
2828 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2829 MAX_TX_QS);
2830 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2831 BE3_MAX_RSS_QS);
2832 adapter->max_event_queues = min_t(u16,
2833 adapter->max_event_queues,
2834 BE3_MAX_RSS_QS);
2835
2836 if (adapter->max_rss_queues &&
2837 adapter->max_rss_queues == adapter->max_rx_queues)
2838 adapter->max_rss_queues -= 1;
2839
2840 if (adapter->max_event_queues < adapter->max_rss_queues)
2841 adapter->max_rss_queues = adapter->max_event_queues;
2842
2843 } else {
2844 if (be_physfn(adapter))
2845 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2846 else
2847 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2848
2849 if (adapter->function_mode & FLEX10_MODE)
2850 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2851 else
2852 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2853
2854 adapter->max_mcast_mac = BE_MAX_MC;
2855 adapter->max_tx_queues = MAX_TX_QS;
2856 adapter->max_rss_queues = (adapter->be3_native) ?
2857 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2858 adapter->max_event_queues = BE3_MAX_RSS_QS;
2859
2860 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2861 BE_IF_FLAGS_BROADCAST |
2862 BE_IF_FLAGS_MULTICAST |
2863 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2864 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2865 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2866 BE_IF_FLAGS_PROMISCUOUS;
2867
2868 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2869 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2870 }
2871}
2872
Sathya Perla39f1d942012-05-08 19:41:24 +00002873/* Routine to query per function resource limits */
2874static int be_get_config(struct be_adapter *adapter)
2875{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002876 int pos, status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002877 u16 dev_num_vfs;
2878
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002879 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2880 &adapter->function_mode,
2881 &adapter->function_caps);
2882 if (status)
2883 goto err;
2884
2885 be_get_resources(adapter);
2886
2887 /* primary mac needs 1 pmac entry */
2888 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2889 sizeof(u32), GFP_KERNEL);
2890 if (!adapter->pmac_id) {
2891 status = -ENOMEM;
2892 goto err;
2893 }
2894
Sathya Perla39f1d942012-05-08 19:41:24 +00002895 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2896 if (pos) {
2897 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2898 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002899 if (!lancer_chip(adapter))
2900 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002901 adapter->dev_num_vfs = dev_num_vfs;
2902 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002903err:
2904 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002905}
2906
Sathya Perla5fb379e2009-06-18 00:02:59 +00002907static int be_setup(struct be_adapter *adapter)
2908{
Sathya Perla39f1d942012-05-08 19:41:24 +00002909 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002910 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002911 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002912 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002913 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002914 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002915
Sathya Perla30128032011-11-10 19:17:57 +00002916 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002917
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002918 if (!lancer_chip(adapter))
2919 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002920
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002921 status = be_get_config(adapter);
2922 if (status)
2923 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002924
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002925 be_msix_enable(adapter);
2926
2927 status = be_evt_queues_create(adapter);
2928 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002929 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002930
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002931 status = be_tx_cqs_create(adapter);
2932 if (status)
2933 goto err;
2934
2935 status = be_rx_cqs_create(adapter);
2936 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002937 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002938
Sathya Perla5fb379e2009-06-18 00:02:59 +00002939 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002940 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002941 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002942
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002943 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2944 /* In UMC mode FW does not return right privileges.
2945 * Override with correct privilege equivalent to PF.
2946 */
2947 if (be_is_mc(adapter))
2948 adapter->cmd_privileges = MAX_PRIVILEGES;
2949
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002950 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2951 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002952
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002953 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002954 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002955
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002956 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002957
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002958 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002959 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002960 if (status != 0)
2961 goto err;
2962
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002963 memset(mac, 0, ETH_ALEN);
2964 active_mac = false;
2965 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2966 &active_mac, &adapter->pmac_id[0]);
2967 if (status != 0)
2968 goto err;
2969
2970 if (!active_mac) {
2971 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2972 &adapter->pmac_id[0], 0);
2973 if (status != 0)
2974 goto err;
2975 }
2976
2977 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2978 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2979 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002980 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002981
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002982 status = be_tx_qs_create(adapter);
2983 if (status)
2984 goto err;
2985
Sathya Perla04b71172011-09-27 13:30:27 -04002986 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002987
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002988 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002989 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002990
2991 be_set_rx_mode(adapter->netdev);
2992
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002993 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002994
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002995 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2996 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002997 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002998
Sathya Perla39f1d942012-05-08 19:41:24 +00002999 if (be_physfn(adapter) && num_vfs) {
3000 if (adapter->dev_num_vfs)
3001 be_vf_setup(adapter);
3002 else
3003 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003004 }
3005
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003006 status = be_cmd_get_phy_info(adapter);
3007 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003008 adapter->phy.fc_autoneg = 1;
3009
Sathya Perla191eb752012-02-23 18:50:13 +00003010 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3011 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003012 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003013err:
3014 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003015 return status;
3016}
3017
Ivan Vecera66268732011-12-08 01:31:21 +00003018#ifdef CONFIG_NET_POLL_CONTROLLER
3019static void be_netpoll(struct net_device *netdev)
3020{
3021 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003022 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003023 int i;
3024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003025 for_all_evt_queues(adapter, eqo, i)
3026 event_handle(eqo);
3027
3028 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003029}
3030#endif
3031
Ajit Khaparde84517482009-09-04 03:12:16 +00003032#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003033char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3034
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003035static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003036 const u8 *p, u32 img_start, int image_size,
3037 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003038{
3039 u32 crc_offset;
3040 u8 flashed_crc[4];
3041 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003042
3043 crc_offset = hdr_size + img_start + image_size - 4;
3044
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003045 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003046
3047 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003048 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003049 if (status) {
3050 dev_err(&adapter->pdev->dev,
3051 "could not get crc from flash, not flashing redboot\n");
3052 return false;
3053 }
3054
3055 /*update redboot only if crc does not match*/
3056 if (!memcmp(flashed_crc, p, 4))
3057 return false;
3058 else
3059 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003060}
3061
Sathya Perla306f1342011-08-02 19:57:45 +00003062static bool phy_flashing_required(struct be_adapter *adapter)
3063{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003064 return (adapter->phy.phy_type == TN_8022 &&
3065 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003066}
3067
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003068static bool is_comp_in_ufi(struct be_adapter *adapter,
3069 struct flash_section_info *fsec, int type)
3070{
3071 int i = 0, img_type = 0;
3072 struct flash_section_info_g2 *fsec_g2 = NULL;
3073
Sathya Perlaca34fe32012-11-06 17:48:56 +00003074 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003075 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3076
3077 for (i = 0; i < MAX_FLASH_COMP; i++) {
3078 if (fsec_g2)
3079 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3080 else
3081 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3082
3083 if (img_type == type)
3084 return true;
3085 }
3086 return false;
3087
3088}
3089
3090struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3091 int header_size,
3092 const struct firmware *fw)
3093{
3094 struct flash_section_info *fsec = NULL;
3095 const u8 *p = fw->data;
3096
3097 p += header_size;
3098 while (p < (fw->data + fw->size)) {
3099 fsec = (struct flash_section_info *)p;
3100 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3101 return fsec;
3102 p += 32;
3103 }
3104 return NULL;
3105}
3106
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003107static int be_flash(struct be_adapter *adapter, const u8 *img,
3108 struct be_dma_mem *flash_cmd, int optype, int img_size)
3109{
3110 u32 total_bytes = 0, flash_op, num_bytes = 0;
3111 int status = 0;
3112 struct be_cmd_write_flashrom *req = flash_cmd->va;
3113
3114 total_bytes = img_size;
3115 while (total_bytes) {
3116 num_bytes = min_t(u32, 32*1024, total_bytes);
3117
3118 total_bytes -= num_bytes;
3119
3120 if (!total_bytes) {
3121 if (optype == OPTYPE_PHY_FW)
3122 flash_op = FLASHROM_OPER_PHY_FLASH;
3123 else
3124 flash_op = FLASHROM_OPER_FLASH;
3125 } else {
3126 if (optype == OPTYPE_PHY_FW)
3127 flash_op = FLASHROM_OPER_PHY_SAVE;
3128 else
3129 flash_op = FLASHROM_OPER_SAVE;
3130 }
3131
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003132 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003133 img += num_bytes;
3134 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3135 flash_op, num_bytes);
3136 if (status) {
3137 if (status == ILLEGAL_IOCTL_REQ &&
3138 optype == OPTYPE_PHY_FW)
3139 break;
3140 dev_err(&adapter->pdev->dev,
3141 "cmd to write to flash rom failed.\n");
3142 return status;
3143 }
3144 }
3145 return 0;
3146}
3147
Sathya Perlaca34fe32012-11-06 17:48:56 +00003148/* For BE2 and BE3 */
3149static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003150 const struct firmware *fw,
3151 struct be_dma_mem *flash_cmd,
3152 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003153
Ajit Khaparde84517482009-09-04 03:12:16 +00003154{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003155 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003156 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003157 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003158 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003159 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003160 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003161
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003162 struct flash_comp gen3_flash_types[] = {
3163 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3164 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3165 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3166 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3167 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3168 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3169 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3170 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3171 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3172 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3173 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3174 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3175 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3176 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3177 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3178 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3179 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3180 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3181 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3182 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003183 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003184
3185 struct flash_comp gen2_flash_types[] = {
3186 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3187 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3188 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3189 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3190 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3191 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3192 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3193 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3194 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3195 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3196 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3197 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3198 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3199 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3200 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3201 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003202 };
3203
Sathya Perlaca34fe32012-11-06 17:48:56 +00003204 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003205 pflashcomp = gen3_flash_types;
3206 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003207 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003208 } else {
3209 pflashcomp = gen2_flash_types;
3210 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003211 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003212 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003213
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003214 /* Get flash section info*/
3215 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3216 if (!fsec) {
3217 dev_err(&adapter->pdev->dev,
3218 "Invalid Cookie. UFI corrupted ?\n");
3219 return -1;
3220 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003221 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003222 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003223 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003224
3225 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3226 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3227 continue;
3228
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003229 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3230 !phy_flashing_required(adapter))
3231 continue;
3232
3233 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3234 redboot = be_flash_redboot(adapter, fw->data,
3235 pflashcomp[i].offset, pflashcomp[i].size,
3236 filehdr_size + img_hdrs_size);
3237 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003238 continue;
3239 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003240
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003241 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003242 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003243 if (p + pflashcomp[i].size > fw->data + fw->size)
3244 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003245
3246 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3247 pflashcomp[i].size);
3248 if (status) {
3249 dev_err(&adapter->pdev->dev,
3250 "Flashing section type %d failed.\n",
3251 pflashcomp[i].img_type);
3252 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003253 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003254 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003255 return 0;
3256}
3257
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003258static int be_flash_skyhawk(struct be_adapter *adapter,
3259 const struct firmware *fw,
3260 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003261{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003262 int status = 0, i, filehdr_size = 0;
3263 int img_offset, img_size, img_optype, redboot;
3264 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3265 const u8 *p = fw->data;
3266 struct flash_section_info *fsec = NULL;
3267
3268 filehdr_size = sizeof(struct flash_file_hdr_g3);
3269 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3270 if (!fsec) {
3271 dev_err(&adapter->pdev->dev,
3272 "Invalid Cookie. UFI corrupted ?\n");
3273 return -1;
3274 }
3275
3276 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3277 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3278 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3279
3280 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3281 case IMAGE_FIRMWARE_iSCSI:
3282 img_optype = OPTYPE_ISCSI_ACTIVE;
3283 break;
3284 case IMAGE_BOOT_CODE:
3285 img_optype = OPTYPE_REDBOOT;
3286 break;
3287 case IMAGE_OPTION_ROM_ISCSI:
3288 img_optype = OPTYPE_BIOS;
3289 break;
3290 case IMAGE_OPTION_ROM_PXE:
3291 img_optype = OPTYPE_PXE_BIOS;
3292 break;
3293 case IMAGE_OPTION_ROM_FCoE:
3294 img_optype = OPTYPE_FCOE_BIOS;
3295 break;
3296 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3297 img_optype = OPTYPE_ISCSI_BACKUP;
3298 break;
3299 case IMAGE_NCSI:
3300 img_optype = OPTYPE_NCSI_FW;
3301 break;
3302 default:
3303 continue;
3304 }
3305
3306 if (img_optype == OPTYPE_REDBOOT) {
3307 redboot = be_flash_redboot(adapter, fw->data,
3308 img_offset, img_size,
3309 filehdr_size + img_hdrs_size);
3310 if (!redboot)
3311 continue;
3312 }
3313
3314 p = fw->data;
3315 p += filehdr_size + img_offset + img_hdrs_size;
3316 if (p + img_size > fw->data + fw->size)
3317 return -1;
3318
3319 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3320 if (status) {
3321 dev_err(&adapter->pdev->dev,
3322 "Flashing section type %d failed.\n",
3323 fsec->fsec_entry[i].type);
3324 return status;
3325 }
3326 }
3327 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003328}
3329
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003330static int lancer_wait_idle(struct be_adapter *adapter)
3331{
3332#define SLIPORT_IDLE_TIMEOUT 30
3333 u32 reg_val;
3334 int status = 0, i;
3335
3336 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3337 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3338 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3339 break;
3340
3341 ssleep(1);
3342 }
3343
3344 if (i == SLIPORT_IDLE_TIMEOUT)
3345 status = -1;
3346
3347 return status;
3348}
3349
3350static int lancer_fw_reset(struct be_adapter *adapter)
3351{
3352 int status = 0;
3353
3354 status = lancer_wait_idle(adapter);
3355 if (status)
3356 return status;
3357
3358 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3359 PHYSDEV_CONTROL_OFFSET);
3360
3361 return status;
3362}
3363
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003364static int lancer_fw_download(struct be_adapter *adapter,
3365 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003366{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003367#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3368#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3369 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003370 const u8 *data_ptr = NULL;
3371 u8 *dest_image_ptr = NULL;
3372 size_t image_size = 0;
3373 u32 chunk_size = 0;
3374 u32 data_written = 0;
3375 u32 offset = 0;
3376 int status = 0;
3377 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003378 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003379
3380 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3381 dev_err(&adapter->pdev->dev,
3382 "FW Image not properly aligned. "
3383 "Length must be 4 byte aligned.\n");
3384 status = -EINVAL;
3385 goto lancer_fw_exit;
3386 }
3387
3388 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3389 + LANCER_FW_DOWNLOAD_CHUNK;
3390 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3391 &flash_cmd.dma, GFP_KERNEL);
3392 if (!flash_cmd.va) {
3393 status = -ENOMEM;
3394 dev_err(&adapter->pdev->dev,
3395 "Memory allocation failure while flashing\n");
3396 goto lancer_fw_exit;
3397 }
3398
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003399 dest_image_ptr = flash_cmd.va +
3400 sizeof(struct lancer_cmd_req_write_object);
3401 image_size = fw->size;
3402 data_ptr = fw->data;
3403
3404 while (image_size) {
3405 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3406
3407 /* Copy the image chunk content. */
3408 memcpy(dest_image_ptr, data_ptr, chunk_size);
3409
3410 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003411 chunk_size, offset,
3412 LANCER_FW_DOWNLOAD_LOCATION,
3413 &data_written, &change_status,
3414 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003415 if (status)
3416 break;
3417
3418 offset += data_written;
3419 data_ptr += data_written;
3420 image_size -= data_written;
3421 }
3422
3423 if (!status) {
3424 /* Commit the FW written */
3425 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003426 0, offset,
3427 LANCER_FW_DOWNLOAD_LOCATION,
3428 &data_written, &change_status,
3429 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003430 }
3431
3432 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3433 flash_cmd.dma);
3434 if (status) {
3435 dev_err(&adapter->pdev->dev,
3436 "Firmware load error. "
3437 "Status code: 0x%x Additional Status: 0x%x\n",
3438 status, add_status);
3439 goto lancer_fw_exit;
3440 }
3441
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003442 if (change_status == LANCER_FW_RESET_NEEDED) {
3443 status = lancer_fw_reset(adapter);
3444 if (status) {
3445 dev_err(&adapter->pdev->dev,
3446 "Adapter busy for FW reset.\n"
3447 "New FW will not be active.\n");
3448 goto lancer_fw_exit;
3449 }
3450 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3451 dev_err(&adapter->pdev->dev,
3452 "System reboot required for new FW"
3453 " to be active\n");
3454 }
3455
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003456 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3457lancer_fw_exit:
3458 return status;
3459}
3460
Sathya Perlaca34fe32012-11-06 17:48:56 +00003461#define UFI_TYPE2 2
3462#define UFI_TYPE3 3
3463#define UFI_TYPE4 4
3464static int be_get_ufi_type(struct be_adapter *adapter,
3465 struct flash_file_hdr_g2 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003466{
3467 if (fhdr == NULL)
3468 goto be_get_ufi_exit;
3469
Sathya Perlaca34fe32012-11-06 17:48:56 +00003470 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3471 return UFI_TYPE4;
3472 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3473 return UFI_TYPE3;
3474 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3475 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003476
3477be_get_ufi_exit:
3478 dev_err(&adapter->pdev->dev,
3479 "UFI and Interface are not compatible for flashing\n");
3480 return -1;
3481}
3482
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003483static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3484{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003485 struct flash_file_hdr_g2 *fhdr;
3486 struct flash_file_hdr_g3 *fhdr3;
3487 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003488 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003489 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003490 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003491
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003492 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003493 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3494 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003495 if (!flash_cmd.va) {
3496 status = -ENOMEM;
3497 dev_err(&adapter->pdev->dev,
3498 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003499 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003500 }
3501
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003502 p = fw->data;
3503 fhdr = (struct flash_file_hdr_g2 *)p;
3504
Sathya Perlaca34fe32012-11-06 17:48:56 +00003505 ufi_type = be_get_ufi_type(adapter, fhdr);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003506
3507 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3508 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3509 for (i = 0; i < num_imgs; i++) {
3510 img_hdr_ptr = (struct image_hdr *)(fw->data +
3511 (sizeof(struct flash_file_hdr_g3) +
3512 i * sizeof(struct image_hdr)));
3513 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Sathya Perlaca34fe32012-11-06 17:48:56 +00003514 if (ufi_type == UFI_TYPE4)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003515 status = be_flash_skyhawk(adapter, fw,
3516 &flash_cmd, num_imgs);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003517 else if (ufi_type == UFI_TYPE3)
3518 status = be_flash_BEx(adapter, fw, &flash_cmd,
3519 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003520 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003521 }
3522
Sathya Perlaca34fe32012-11-06 17:48:56 +00003523 if (ufi_type == UFI_TYPE2)
3524 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003525 else if (ufi_type == -1)
3526 status = -1;
3527
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003528 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3529 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003530 if (status) {
3531 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003532 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003533 }
3534
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003535 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003536
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003537be_fw_exit:
3538 return status;
3539}
3540
3541int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3542{
3543 const struct firmware *fw;
3544 int status;
3545
3546 if (!netif_running(adapter->netdev)) {
3547 dev_err(&adapter->pdev->dev,
3548 "Firmware load not allowed (interface is down)\n");
3549 return -1;
3550 }
3551
3552 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3553 if (status)
3554 goto fw_exit;
3555
3556 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3557
3558 if (lancer_chip(adapter))
3559 status = lancer_fw_download(adapter, fw);
3560 else
3561 status = be_fw_download(adapter, fw);
3562
Ajit Khaparde84517482009-09-04 03:12:16 +00003563fw_exit:
3564 release_firmware(fw);
3565 return status;
3566}
3567
stephen hemmingere5686ad2012-01-05 19:10:25 +00003568static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003569 .ndo_open = be_open,
3570 .ndo_stop = be_close,
3571 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003572 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003573 .ndo_set_mac_address = be_mac_addr_set,
3574 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003575 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003576 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003577 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3578 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003579 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003580 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003581 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003582 .ndo_get_vf_config = be_get_vf_config,
3583#ifdef CONFIG_NET_POLL_CONTROLLER
3584 .ndo_poll_controller = be_netpoll,
3585#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003586};
3587
3588static void be_netdev_init(struct net_device *netdev)
3589{
3590 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003591 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003592 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003593
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003594 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003595 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3596 NETIF_F_HW_VLAN_TX;
3597 if (be_multi_rxq(adapter))
3598 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003599
3600 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003601 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003602
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003603 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003604 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003605
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003606 netdev->priv_flags |= IFF_UNICAST_FLT;
3607
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003608 netdev->flags |= IFF_MULTICAST;
3609
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003610 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003611
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003612 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003613
3614 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3615
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003616 for_all_evt_queues(adapter, eqo, i)
3617 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003618}
3619
3620static void be_unmap_pci_bars(struct be_adapter *adapter)
3621{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003622 if (adapter->csr)
3623 iounmap(adapter->csr);
3624 if (adapter->db)
3625 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003626 if (adapter->roce_db.base)
3627 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3628}
3629
3630static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3631{
3632 struct pci_dev *pdev = adapter->pdev;
3633 u8 __iomem *addr;
3634
3635 addr = pci_iomap(pdev, 2, 0);
3636 if (addr == NULL)
3637 return -ENOMEM;
3638
3639 adapter->roce_db.base = addr;
3640 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3641 adapter->roce_db.size = 8192;
3642 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3643 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003644}
3645
3646static int be_map_pci_bars(struct be_adapter *adapter)
3647{
3648 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003649 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003650
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003651 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003652 if (be_type_2_3(adapter)) {
3653 addr = ioremap_nocache(
3654 pci_resource_start(adapter->pdev, 0),
3655 pci_resource_len(adapter->pdev, 0));
3656 if (addr == NULL)
3657 return -ENOMEM;
3658 adapter->db = addr;
3659 }
3660 if (adapter->if_type == SLI_INTF_TYPE_3) {
3661 if (lancer_roce_map_pci_bars(adapter))
3662 goto pci_map_err;
3663 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003664 return 0;
3665 }
3666
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003667 if (be_physfn(adapter)) {
3668 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3669 pci_resource_len(adapter->pdev, 2));
3670 if (addr == NULL)
3671 return -ENOMEM;
3672 adapter->csr = addr;
3673 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003674
Sathya Perlaca34fe32012-11-06 17:48:56 +00003675 if (BE2_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003676 db_reg = 4;
3677 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003678 if (be_physfn(adapter))
3679 db_reg = 4;
3680 else
3681 db_reg = 0;
3682 }
3683 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3684 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003685 if (addr == NULL)
3686 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003687 adapter->db = addr;
Sathya Perla5c4f2fb2012-11-06 17:48:57 +00003688 if (skyhawk_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003689 adapter->roce_db.size = 4096;
3690 adapter->roce_db.io_addr =
3691 pci_resource_start(adapter->pdev, db_reg);
3692 adapter->roce_db.total_size =
3693 pci_resource_len(adapter->pdev, db_reg);
3694 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003695 return 0;
3696pci_map_err:
3697 be_unmap_pci_bars(adapter);
3698 return -ENOMEM;
3699}
3700
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003701static void be_ctrl_cleanup(struct be_adapter *adapter)
3702{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003703 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003704
3705 be_unmap_pci_bars(adapter);
3706
3707 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003708 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3709 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003710
Sathya Perla5b8821b2011-08-02 19:57:44 +00003711 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003712 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003713 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3714 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003715}
3716
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003717static int be_ctrl_init(struct be_adapter *adapter)
3718{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003719 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3720 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003721 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003722 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003723
3724 status = be_map_pci_bars(adapter);
3725 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003726 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003727
3728 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003729 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3730 mbox_mem_alloc->size,
3731 &mbox_mem_alloc->dma,
3732 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003733 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003734 status = -ENOMEM;
3735 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003736 }
3737 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3738 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3739 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3740 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003741
Sathya Perla5b8821b2011-08-02 19:57:44 +00003742 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3743 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3744 &rx_filter->dma, GFP_KERNEL);
3745 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003746 status = -ENOMEM;
3747 goto free_mbox;
3748 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003749 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003750 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003751 spin_lock_init(&adapter->mcc_lock);
3752 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003753
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003754 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003755 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003756 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003757
3758free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003759 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3760 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003761
3762unmap_pci_bars:
3763 be_unmap_pci_bars(adapter);
3764
3765done:
3766 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003767}
3768
3769static void be_stats_cleanup(struct be_adapter *adapter)
3770{
Sathya Perla3abcded2010-10-03 22:12:27 -07003771 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003772
3773 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003774 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3775 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003776}
3777
3778static int be_stats_init(struct be_adapter *adapter)
3779{
Sathya Perla3abcded2010-10-03 22:12:27 -07003780 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003781
Sathya Perlaca34fe32012-11-06 17:48:56 +00003782 if (lancer_chip(adapter))
3783 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3784 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003785 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003786 else
3787 /* BE3 and Skyhawk */
3788 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3789
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003790 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3791 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003792 if (cmd->va == NULL)
3793 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003794 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003795 return 0;
3796}
3797
3798static void __devexit be_remove(struct pci_dev *pdev)
3799{
3800 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003801
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003802 if (!adapter)
3803 return;
3804
Parav Pandit045508a2012-03-26 14:27:13 +00003805 be_roce_dev_remove(adapter);
3806
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003807 cancel_delayed_work_sync(&adapter->func_recovery_work);
3808
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003809 unregister_netdev(adapter->netdev);
3810
Sathya Perla5fb379e2009-06-18 00:02:59 +00003811 be_clear(adapter);
3812
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003813 /* tell fw we're done with firing cmds */
3814 be_cmd_fw_clean(adapter);
3815
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003816 be_stats_cleanup(adapter);
3817
3818 be_ctrl_cleanup(adapter);
3819
Sathya Perlad6b6d982012-09-05 01:56:48 +00003820 pci_disable_pcie_error_reporting(pdev);
3821
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003822 pci_set_drvdata(pdev, NULL);
3823 pci_release_regions(pdev);
3824 pci_disable_device(pdev);
3825
3826 free_netdev(adapter->netdev);
3827}
3828
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003829bool be_is_wol_supported(struct be_adapter *adapter)
3830{
3831 return ((adapter->wol_cap & BE_WOL_CAP) &&
3832 !be_is_wol_excluded(adapter)) ? true : false;
3833}
3834
Somnath Kotur941a77d2012-05-17 22:59:03 +00003835u32 be_get_fw_log_level(struct be_adapter *adapter)
3836{
3837 struct be_dma_mem extfat_cmd;
3838 struct be_fat_conf_params *cfgs;
3839 int status;
3840 u32 level = 0;
3841 int j;
3842
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003843 if (lancer_chip(adapter))
3844 return 0;
3845
Somnath Kotur941a77d2012-05-17 22:59:03 +00003846 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3847 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3848 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3849 &extfat_cmd.dma);
3850
3851 if (!extfat_cmd.va) {
3852 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3853 __func__);
3854 goto err;
3855 }
3856
3857 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3858 if (!status) {
3859 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3860 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003861 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003862 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3863 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3864 }
3865 }
3866 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3867 extfat_cmd.dma);
3868err:
3869 return level;
3870}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003871
Sathya Perla39f1d942012-05-08 19:41:24 +00003872static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003873{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003874 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003875 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003876
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003877 status = be_cmd_get_cntl_attributes(adapter);
3878 if (status)
3879 return status;
3880
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003881 status = be_cmd_get_acpi_wol_cap(adapter);
3882 if (status) {
3883 /* in case of a failure to get wol capabillities
3884 * check the exclusion list to determine WOL capability */
3885 if (!be_is_wol_excluded(adapter))
3886 adapter->wol_cap |= BE_WOL_CAP;
3887 }
3888
3889 if (be_is_wol_supported(adapter))
3890 adapter->wol = true;
3891
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003892 /* Must be a power of 2 or else MODULO will BUG_ON */
3893 adapter->be_get_temp_freq = 64;
3894
Somnath Kotur941a77d2012-05-17 22:59:03 +00003895 level = be_get_fw_log_level(adapter);
3896 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3897
Sathya Perla2243e2e2009-11-22 22:02:03 +00003898 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003899}
3900
Sathya Perla39f1d942012-05-08 19:41:24 +00003901static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003902{
3903 struct pci_dev *pdev = adapter->pdev;
3904 u32 sli_intf = 0, if_type;
3905
3906 switch (pdev->device) {
3907 case BE_DEVICE_ID1:
3908 case OC_DEVICE_ID1:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003909 break;
3910 case BE_DEVICE_ID2:
3911 case OC_DEVICE_ID2:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003912 break;
3913 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003914 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003915 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003916 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3917 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003918 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3919 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003920 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003921 !be_type_2_3(adapter)) {
3922 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3923 return -EINVAL;
3924 }
3925 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3926 SLI_INTF_FAMILY_SHIFT);
Parav Pandit045508a2012-03-26 14:27:13 +00003927 break;
3928 case OC_DEVICE_ID5:
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +00003929 case OC_DEVICE_ID6:
Parav Pandit045508a2012-03-26 14:27:13 +00003930 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3931 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003932 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3933 return -EINVAL;
3934 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003935 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3936 SLI_INTF_FAMILY_SHIFT);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003937 break;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003938 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003939
3940 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3941 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003942 return 0;
3943}
3944
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003945static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003946{
3947 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003948
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003949 status = lancer_test_and_set_rdy_state(adapter);
3950 if (status)
3951 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003952
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003953 if (netif_running(adapter->netdev))
3954 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003955
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003956 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003957
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003958 adapter->hw_error = false;
3959 adapter->fw_timeout = false;
3960
3961 status = be_setup(adapter);
3962 if (status)
3963 goto err;
3964
3965 if (netif_running(adapter->netdev)) {
3966 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003967 if (status)
3968 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003969 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003970
3971 dev_err(&adapter->pdev->dev,
3972 "Adapter SLIPORT recovery succeeded\n");
3973 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003974err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003975 if (adapter->eeh_error)
3976 dev_err(&adapter->pdev->dev,
3977 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003978
3979 return status;
3980}
3981
3982static void be_func_recovery_task(struct work_struct *work)
3983{
3984 struct be_adapter *adapter =
3985 container_of(work, struct be_adapter, func_recovery_work.work);
3986 int status;
3987
3988 be_detect_error(adapter);
3989
3990 if (adapter->hw_error && lancer_chip(adapter)) {
3991
3992 if (adapter->eeh_error)
3993 goto out;
3994
3995 rtnl_lock();
3996 netif_device_detach(adapter->netdev);
3997 rtnl_unlock();
3998
3999 status = lancer_recover_func(adapter);
4000
4001 if (!status)
4002 netif_device_attach(adapter->netdev);
4003 }
4004
4005out:
4006 schedule_delayed_work(&adapter->func_recovery_work,
4007 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004008}
4009
4010static void be_worker(struct work_struct *work)
4011{
4012 struct be_adapter *adapter =
4013 container_of(work, struct be_adapter, work.work);
4014 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004015 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004016 int i;
4017
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004018 /* when interrupts are not yet enabled, just reap any pending
4019 * mcc completions */
4020 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004021 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004022 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004023 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004024 goto reschedule;
4025 }
4026
4027 if (!adapter->stats_cmd_sent) {
4028 if (lancer_chip(adapter))
4029 lancer_cmd_get_pport_stats(adapter,
4030 &adapter->stats_cmd);
4031 else
4032 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4033 }
4034
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004035 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4036 be_cmd_get_die_temperature(adapter);
4037
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004038 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004039 if (rxo->rx_post_starved) {
4040 rxo->rx_post_starved = false;
4041 be_post_rx_frags(rxo, GFP_KERNEL);
4042 }
4043 }
4044
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004045 for_all_evt_queues(adapter, eqo, i)
4046 be_eqd_update(adapter, eqo);
4047
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004048reschedule:
4049 adapter->work_counter++;
4050 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4051}
4052
Sathya Perla39f1d942012-05-08 19:41:24 +00004053static bool be_reset_required(struct be_adapter *adapter)
4054{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004055 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004056}
4057
Sathya Perlad3791422012-09-28 04:39:44 +00004058static char *mc_name(struct be_adapter *adapter)
4059{
4060 if (adapter->function_mode & FLEX10_MODE)
4061 return "FLEX10";
4062 else if (adapter->function_mode & VNIC_MODE)
4063 return "vNIC";
4064 else if (adapter->function_mode & UMC_ENABLED)
4065 return "UMC";
4066 else
4067 return "";
4068}
4069
4070static inline char *func_name(struct be_adapter *adapter)
4071{
4072 return be_physfn(adapter) ? "PF" : "VF";
4073}
4074
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004075static int __devinit be_probe(struct pci_dev *pdev,
4076 const struct pci_device_id *pdev_id)
4077{
4078 int status = 0;
4079 struct be_adapter *adapter;
4080 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004081 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004082
4083 status = pci_enable_device(pdev);
4084 if (status)
4085 goto do_none;
4086
4087 status = pci_request_regions(pdev, DRV_NAME);
4088 if (status)
4089 goto disable_dev;
4090 pci_set_master(pdev);
4091
Sathya Perla7f640062012-06-05 19:37:20 +00004092 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004093 if (netdev == NULL) {
4094 status = -ENOMEM;
4095 goto rel_reg;
4096 }
4097 adapter = netdev_priv(netdev);
4098 adapter->pdev = pdev;
4099 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004100
Sathya Perla39f1d942012-05-08 19:41:24 +00004101 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00004102 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004103 goto free_netdev;
4104
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004105 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004106 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004107
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004108 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004109 if (!status) {
4110 netdev->features |= NETIF_F_HIGHDMA;
4111 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004112 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004113 if (status) {
4114 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4115 goto free_netdev;
4116 }
4117 }
4118
Sathya Perlad6b6d982012-09-05 01:56:48 +00004119 status = pci_enable_pcie_error_reporting(pdev);
4120 if (status)
4121 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4122
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004123 status = be_ctrl_init(adapter);
4124 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004125 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004126
Sathya Perla2243e2e2009-11-22 22:02:03 +00004127 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004128 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004129 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004130 if (status)
4131 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004132 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004133
4134 /* tell fw we're ready to fire cmds */
4135 status = be_cmd_fw_init(adapter);
4136 if (status)
4137 goto ctrl_clean;
4138
Sathya Perla39f1d942012-05-08 19:41:24 +00004139 if (be_reset_required(adapter)) {
4140 status = be_cmd_reset_function(adapter);
4141 if (status)
4142 goto ctrl_clean;
4143 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004144
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004145 /* The INTR bit may be set in the card when probed by a kdump kernel
4146 * after a crash.
4147 */
4148 if (!lancer_chip(adapter))
4149 be_intr_set(adapter, false);
4150
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004151 status = be_stats_init(adapter);
4152 if (status)
4153 goto ctrl_clean;
4154
Sathya Perla39f1d942012-05-08 19:41:24 +00004155 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004156 if (status)
4157 goto stats_clean;
4158
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004159 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004160 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004161 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004162
Sathya Perla5fb379e2009-06-18 00:02:59 +00004163 status = be_setup(adapter);
4164 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004165 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004166
Sathya Perla3abcded2010-10-03 22:12:27 -07004167 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004168 status = register_netdev(netdev);
4169 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004170 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004171
Parav Pandit045508a2012-03-26 14:27:13 +00004172 be_roce_dev_add(adapter);
4173
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004174 schedule_delayed_work(&adapter->func_recovery_work,
4175 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004176
4177 be_cmd_query_port_name(adapter, &port_name);
4178
Sathya Perlad3791422012-09-28 04:39:44 +00004179 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4180 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004181
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004182 return 0;
4183
Sathya Perla5fb379e2009-06-18 00:02:59 +00004184unsetup:
4185 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004186stats_clean:
4187 be_stats_cleanup(adapter);
4188ctrl_clean:
4189 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004190free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004191 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004192 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004193rel_reg:
4194 pci_release_regions(pdev);
4195disable_dev:
4196 pci_disable_device(pdev);
4197do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004198 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004199 return status;
4200}
4201
4202static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4203{
4204 struct be_adapter *adapter = pci_get_drvdata(pdev);
4205 struct net_device *netdev = adapter->netdev;
4206
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004207 if (adapter->wol)
4208 be_setup_wol(adapter, true);
4209
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004210 cancel_delayed_work_sync(&adapter->func_recovery_work);
4211
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004212 netif_device_detach(netdev);
4213 if (netif_running(netdev)) {
4214 rtnl_lock();
4215 be_close(netdev);
4216 rtnl_unlock();
4217 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004218 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004219
4220 pci_save_state(pdev);
4221 pci_disable_device(pdev);
4222 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4223 return 0;
4224}
4225
4226static int be_resume(struct pci_dev *pdev)
4227{
4228 int status = 0;
4229 struct be_adapter *adapter = pci_get_drvdata(pdev);
4230 struct net_device *netdev = adapter->netdev;
4231
4232 netif_device_detach(netdev);
4233
4234 status = pci_enable_device(pdev);
4235 if (status)
4236 return status;
4237
4238 pci_set_power_state(pdev, 0);
4239 pci_restore_state(pdev);
4240
Sathya Perla2243e2e2009-11-22 22:02:03 +00004241 /* tell fw we're ready to fire cmds */
4242 status = be_cmd_fw_init(adapter);
4243 if (status)
4244 return status;
4245
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004246 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004247 if (netif_running(netdev)) {
4248 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004249 be_open(netdev);
4250 rtnl_unlock();
4251 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004252
4253 schedule_delayed_work(&adapter->func_recovery_work,
4254 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004256
4257 if (adapter->wol)
4258 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004259
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004260 return 0;
4261}
4262
Sathya Perla82456b02010-02-17 01:35:37 +00004263/*
4264 * An FLR will stop BE from DMAing any data.
4265 */
4266static void be_shutdown(struct pci_dev *pdev)
4267{
4268 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004269
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004270 if (!adapter)
4271 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004272
Sathya Perla0f4a6822011-03-21 20:49:28 +00004273 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004274 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004275
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004276 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004277
Ajit Khaparde57841862011-04-06 18:08:43 +00004278 be_cmd_reset_function(adapter);
4279
Sathya Perla82456b02010-02-17 01:35:37 +00004280 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004281}
4282
Sathya Perlacf588472010-02-14 21:22:01 +00004283static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4284 pci_channel_state_t state)
4285{
4286 struct be_adapter *adapter = pci_get_drvdata(pdev);
4287 struct net_device *netdev = adapter->netdev;
4288
4289 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4290
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004291 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004292
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004293 cancel_delayed_work_sync(&adapter->func_recovery_work);
4294
4295 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004296 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004297 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004298
4299 if (netif_running(netdev)) {
4300 rtnl_lock();
4301 be_close(netdev);
4302 rtnl_unlock();
4303 }
4304 be_clear(adapter);
4305
4306 if (state == pci_channel_io_perm_failure)
4307 return PCI_ERS_RESULT_DISCONNECT;
4308
4309 pci_disable_device(pdev);
4310
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004311 /* The error could cause the FW to trigger a flash debug dump.
4312 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004313 * can cause it not to recover; wait for it to finish.
4314 * Wait only for first function as it is needed only once per
4315 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004316 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004317 if (pdev->devfn == 0)
4318 ssleep(30);
4319
Sathya Perlacf588472010-02-14 21:22:01 +00004320 return PCI_ERS_RESULT_NEED_RESET;
4321}
4322
4323static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4324{
4325 struct be_adapter *adapter = pci_get_drvdata(pdev);
4326 int status;
4327
4328 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004329 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004330
4331 status = pci_enable_device(pdev);
4332 if (status)
4333 return PCI_ERS_RESULT_DISCONNECT;
4334
4335 pci_set_master(pdev);
4336 pci_set_power_state(pdev, 0);
4337 pci_restore_state(pdev);
4338
4339 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004340 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004341 if (status)
4342 return PCI_ERS_RESULT_DISCONNECT;
4343
Sathya Perlad6b6d982012-09-05 01:56:48 +00004344 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004345 return PCI_ERS_RESULT_RECOVERED;
4346}
4347
4348static void be_eeh_resume(struct pci_dev *pdev)
4349{
4350 int status = 0;
4351 struct be_adapter *adapter = pci_get_drvdata(pdev);
4352 struct net_device *netdev = adapter->netdev;
4353
4354 dev_info(&adapter->pdev->dev, "EEH resume\n");
4355
4356 pci_save_state(pdev);
4357
4358 /* tell fw we're ready to fire cmds */
4359 status = be_cmd_fw_init(adapter);
4360 if (status)
4361 goto err;
4362
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004363 status = be_cmd_reset_function(adapter);
4364 if (status)
4365 goto err;
4366
Sathya Perlacf588472010-02-14 21:22:01 +00004367 status = be_setup(adapter);
4368 if (status)
4369 goto err;
4370
4371 if (netif_running(netdev)) {
4372 status = be_open(netdev);
4373 if (status)
4374 goto err;
4375 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004376
4377 schedule_delayed_work(&adapter->func_recovery_work,
4378 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004379 netif_device_attach(netdev);
4380 return;
4381err:
4382 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004383}
4384
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004385static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004386 .error_detected = be_eeh_err_detected,
4387 .slot_reset = be_eeh_reset,
4388 .resume = be_eeh_resume,
4389};
4390
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004391static struct pci_driver be_driver = {
4392 .name = DRV_NAME,
4393 .id_table = be_dev_ids,
4394 .probe = be_probe,
4395 .remove = be_remove,
4396 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004397 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004398 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004399 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004400};
4401
4402static int __init be_init_module(void)
4403{
Joe Perches8e95a202009-12-03 07:58:21 +00004404 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4405 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004406 printk(KERN_WARNING DRV_NAME
4407 " : Module param rx_frag_size must be 2048/4096/8192."
4408 " Using 2048\n");
4409 rx_frag_size = 2048;
4410 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004412 return pci_register_driver(&be_driver);
4413}
4414module_init(be_init_module);
4415
4416static void __exit be_exit_module(void)
4417{
4418 pci_unregister_driver(&be_driver);
4419}
4420module_exit(be_exit_module);