blob: 6accb0c0273ae6266aada898242dcfcf8d9aa61d [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070051static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000052 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070086static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000087 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700110 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
Sathya Perla752961a2011-10-24 02:45:03 +0000121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 mem->va = NULL;
135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000150 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
Sathya Perla8788fdc2009-07-27 22:52:03 +0000155static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000159 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000160 return;
161
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000205 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000206 return;
207
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000224 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000225 return;
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000238 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000239 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000241 if (!is_valid_ether_addr(addr->sa_data))
242 return -EADDRNOTAVAIL;
243
Sathya Perla5ee49792012-09-28 04:39:41 +0000244 status = be_cmd_mac_addr_query(adapter, current_mac, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000561 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562}
563
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566{
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578}
579
Somnath Kotur93040ae2012-06-26 22:32:10 +0000580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
Somnath Koturcc4ce022010-10-21 07:11:14 -0700585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000588 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700589
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590 memset(hdr, 0, sizeof(*hdr));
591
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000594 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 }
616
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700617 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 }
622
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627}
628
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000630 bool unmap_single)
631{
632 dma_addr_t dma;
633
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000637 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000638 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 dma_unmap_single(dev, dma, wrb->frag_len,
640 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000641 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000643 }
644}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla3c8def92011-06-12 20:01:58 +0000646static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648{
Sathya Perla7101e112010-03-22 20:41:12 +0000649 dma_addr_t busaddr;
650 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000655 bool map_single = false;
656 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 hdr = queue_head_node(txq);
659 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000660 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700663 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000666 goto dma_err;
667 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
672 copied += len;
673 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674
David S. Millerebc8d2a2009-06-09 01:01:31 -0700675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000676 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700677 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000678 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000679 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000680 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000681 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700682 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000686 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 }
688
689 if (dummy_wrb) {
690 wrb = queue_head_node(txq);
691 wrb_fill(wrb, 0, 0);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 queue_head_inc(txq);
694 }
695
Somnath Koturcc4ce022010-10-21 07:11:14 -0700696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000700dma_err:
701 txq->head = map_head;
702 while (copied) {
703 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000704 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000705 map_single = false;
706 copied -= wrb->frag_len;
707 queue_head_inc(txq);
708 }
709 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710}
711
Somnath Kotur93040ae2012-06-26 22:32:10 +0000712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
Stephen Hemminger613573252009-08-31 19:50:58 +0000730static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700731 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732{
733 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000736 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000738 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 bool dummy_wrb, stopped = false;
740
Somnath Kotur93040ae2012-06-26 22:32:10 +0000741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
743
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000746 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 is_ipv4_pkt(skb)) {
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 }
752
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000760 if (unlikely(!skb))
761 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762 }
763
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765
Sathya Perla3c8def92011-06-12 20:01:58 +0000766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000767 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000768 int gso_segs = skb_shinfo(skb)->gso_segs;
769
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000770 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
Sathya Perla7101e112010-03-22 20:41:12 +0000778 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000782 stopped = true;
783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000785 be_txq_notify(adapter, txq->id, wrb_cnt);
786
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000792tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 */
Sathya Perla10329df2012-06-05 19:37:18 +0000818static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
Sathya Perla10329df2012-06-05 19:37:18 +0000820 u16 vids[BE_NUM_VLANS_SUPPORTED];
821 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000822 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000823
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
826 return 0;
827
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
830
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000834 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000835
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000837 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000838
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
840 if (status) {
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000845
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000847
848set_vlan_promisc:
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 NULL, 0, 1, 1);
851 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852}
853
Jiri Pirko8e586132011-12-08 19:52:37 -0500854static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855{
856 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000857 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000863
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000866 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500867
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000868 if (!status)
869 adapter->vlans_added++;
870 else
871 adapter->vlan_tag[vid] = 0;
872ret:
873 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874}
875
Jiri Pirko8e586132011-12-08 19:52:37 -0500876static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877{
878 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000879 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000881 if (!be_physfn(adapter)) {
882 status = -EINVAL;
883 goto ret;
884 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000885
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000887 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000888 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500889
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000890 if (!status)
891 adapter->vlans_added--;
892 else
893 adapter->vlan_tag[vid] = 1;
894ret:
895 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896}
897
Sathya Perlaa54769f2011-10-24 02:45:00 +0000898static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899{
900 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000901 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700902
903 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000905 adapter->promiscuous = true;
906 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000908
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300909 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000913
914 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000915 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000916 }
917
Sathya Perlae7b909a2009-11-22 22:01:10 +0000918 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000919 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000920 netdev_mc_count(netdev) > BE_MAX_MC) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000922 goto done;
923 }
924
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
928
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
932 }
933
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
937 goto done;
938 }
939
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 adapter->if_handle,
944 &adapter->pmac_id[adapter->uc_macs], 0);
945 }
946 }
947
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
951 if (status) {
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000956done:
957 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958}
959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000964 int status;
965
Sathya Perla11ac75e2011-12-13 00:58:50 +0000966 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000967 return -EPERM;
968
Sathya Perla11ac75e2011-12-13 00:58:50 +0000969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000970 return -EINVAL;
971
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000980 }
981
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000982 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000987
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000988 return status;
989}
990
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000991static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000996
Sathya Perla11ac75e2011-12-13 00:58:50 +0000997 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000998 return -EPERM;
999
Sathya Perla11ac75e2011-12-13 00:58:50 +00001000 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001001 return -EINVAL;
1002
1003 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001006 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001008
1009 return 0;
1010}
1011
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001012static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1014{
1015 struct be_adapter *adapter = netdev_priv(netdev);
1016 int status = 0;
1017
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001019 return -EPERM;
1020
Sathya Perla11ac75e2011-12-13 00:58:50 +00001021 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001022 return -EINVAL;
1023
1024 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1031 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001032 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001033 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001034 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001038 }
1039
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001040
1041 if (status)
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1044 return status;
1045}
1046
Ajit Khapardee1d18732010-07-23 01:52:13 +00001047static int be_set_vf_tx_rate(struct net_device *netdev,
1048 int vf, int rate)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 int status = 0;
1052
Sathya Perla11ac75e2011-12-13 00:58:50 +00001053 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001054 return -EPERM;
1055
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001056 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001057 return -EINVAL;
1058
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1062 return -EINVAL;
1063 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001064
Ajit Khaparde856c4012011-02-11 13:32:32 +00001065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001066
1067 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001068 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001069 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001070 else
1071 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001072 return status;
1073}
1074
Sathya Perla39f1d942012-05-08 19:41:24 +00001075static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076{
1077 struct pci_dev *dev, *pdev = adapter->pdev;
David S. Millerd9f72f32012-09-27 22:19:02 -04001078 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
Sathya Perla39f1d942012-05-08 19:41:24 +00001079 u16 offset, stride;
1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001082 if (!pos)
1083 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) {
David S. Millerd9f72f32012-09-27 22:19:02 -04001089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001092 vfs++;
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 assigned_vfs++;
1095 }
1096 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097 }
1098 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099}
1100
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001101static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001103 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001104 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001105 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001106 u64 pkts;
1107 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001109 if (!eqo->enable_aic) {
1110 eqd = eqo->eqd;
1111 goto modify_eqd;
1112 }
1113
1114 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001115 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001117 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118
Sathya Perla4097f662009-03-24 16:40:13 -07001119 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001120 if (time_before(now, stats->rx_jiffies)) {
1121 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001122 return;
1123 }
1124
Sathya Perlaac124ff2011-07-25 19:10:14 +00001125 /* Update once a second */
1126 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001127 return;
1128
Sathya Perlaab1594e2011-07-25 19:10:15 +00001129 do {
1130 start = u64_stats_fetch_begin_bh(&stats->sync);
1131 pkts = stats->rx_pkts;
1132 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001134 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001135 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001136 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001137 eqd = (stats->rx_pps / 110000) << 3;
1138 eqd = min(eqd, eqo->max_eqd);
1139 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001140 if (eqd < 10)
1141 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142
1143modify_eqd:
1144 if (eqd != eqo->cur_eqd) {
1145 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001147 }
Sathya Perla4097f662009-03-24 16:40:13 -07001148}
1149
Sathya Perla3abcded2010-10-03 22:12:27 -07001150static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001151 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001152{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001153 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001154
Sathya Perlaab1594e2011-07-25 19:10:15 +00001155 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001156 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001157 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001158 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001159 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001160 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001161 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001162 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001163 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164}
1165
Sathya Perla2e588f82011-03-11 02:49:26 +00001166static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001167{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001168 /* L4 checksum is not reliable for non TCP/UDP packets.
1169 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001170 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001172}
1173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001174static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001177 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001179 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180
Sathya Perla3abcded2010-10-03 22:12:27 -07001181 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 BUG_ON(!rx_page_info->page);
1183
Ajit Khaparde205859a2010-02-09 01:34:21 +00001184 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001185 dma_unmap_page(&adapter->pdev->dev,
1186 dma_unmap_addr(rx_page_info, bus),
1187 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001188 rx_page_info->last_page_user = false;
1189 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190
1191 atomic_dec(&rxq->used);
1192 return rx_page_info;
1193}
1194
1195/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001196static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198{
Sathya Perla3abcded2010-10-03 22:12:27 -07001199 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001201 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001203 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001204 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001205 put_page(page_info->page);
1206 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001207 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 }
1209}
1210
1211/*
1212 * skb_fill_rx_data forms a complete skb for an ether frame
1213 * indicated by rxcp.
1214 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001215static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217{
Sathya Perla3abcded2010-10-03 22:12:27 -07001218 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 u16 i, j;
1221 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 u8 *start;
1223
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001224 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225 start = page_address(page_info->page) + page_info->page_offset;
1226 prefetch(start);
1227
1228 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231 skb->len = curr_frag_len;
1232 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001233 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234 /* Complete packet has now been moved to data */
1235 put_page(page_info->page);
1236 skb->data_len = 0;
1237 skb->tail += curr_frag_len;
1238 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001239 hdr_len = ETH_HLEN;
1240 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001242 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243 skb_shinfo(skb)->frags[0].page_offset =
1244 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001245 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001247 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 skb->tail += hdr_len;
1249 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001250 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251
Sathya Perla2e588f82011-03-11 02:49:26 +00001252 if (rxcp->pkt_size <= rx_frag_size) {
1253 BUG_ON(rxcp->num_rcvd != 1);
1254 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 }
1256
1257 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001258 index_inc(&rxcp->rxq_idx, rxq->len);
1259 remaining = rxcp->pkt_size - curr_frag_len;
1260 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001264 /* Coalesce all frags from the same physical page in one slot */
1265 if (page_info->page_offset == 0) {
1266 /* Fresh page */
1267 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001268 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001269 skb_shinfo(skb)->frags[j].page_offset =
1270 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001271 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001272 skb_shinfo(skb)->nr_frags++;
1273 } else {
1274 put_page(page_info->page);
1275 }
1276
Eric Dumazet9e903e02011-10-18 21:00:24 +00001277 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 skb->len += curr_frag_len;
1279 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001280 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001281 remaining -= curr_frag_len;
1282 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001283 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001285 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286}
1287
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001288/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001289static void be_rx_compl_process(struct be_rx_obj *rxo,
1290 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001292 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001293 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001295
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001296 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001297 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001298 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001299 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300 return;
1301 }
1302
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001303 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001305 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001306 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001307 else
1308 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001309
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001310 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001311 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001312 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001313 skb->rxhash = rxcp->rss_hash;
1314
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315
Jiri Pirko343e43c2011-08-25 02:50:51 +00001316 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
1319 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320}
1321
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001322/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001323void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001326 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001328 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001329 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001330 u16 remaining, curr_frag_len;
1331 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001333 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001334 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001335 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001336 return;
1337 }
1338
Sathya Perla2e588f82011-03-11 02:49:26 +00001339 remaining = rxcp->pkt_size;
1340 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001341 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342
1343 curr_frag_len = min(remaining, rx_frag_size);
1344
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001345 /* Coalesce all frags from the same physical page in one slot */
1346 if (i == 0 || page_info->page_offset == 0) {
1347 /* First frag or Fresh page */
1348 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001349 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001350 skb_shinfo(skb)->frags[j].page_offset =
1351 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001352 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001353 } else {
1354 put_page(page_info->page);
1355 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001356 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001357 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001359 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 memset(page_info, 0, sizeof(*page_info));
1361 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001362 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001364 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001365 skb->len = rxcp->pkt_size;
1366 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001367 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001369 if (adapter->netdev->features & NETIF_F_RXHASH)
1370 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001371
Jiri Pirko343e43c2011-08-25 02:50:51 +00001372 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001373 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001375 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376}
1377
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001378static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380{
Sathya Perla2e588f82011-03-11 02:49:26 +00001381 rxcp->pkt_size =
1382 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001386 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001387 rxcp->ip_csum =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389 rxcp->l4_csum =
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391 rxcp->ipv6 =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393 rxcp->rxq_idx =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395 rxcp->num_rcvd =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397 rxcp->pkt_type =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001399 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001400 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001401 if (rxcp->vlanf) {
1402 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001403 compl);
1404 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001406 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001407 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001408}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001410static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001412{
1413 rxcp->pkt_size =
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001419 rxcp->ip_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421 rxcp->l4_csum =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423 rxcp->ipv6 =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425 rxcp->rxq_idx =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427 rxcp->num_rcvd =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429 rxcp->pkt_type =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001431 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001432 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001433 if (rxcp->vlanf) {
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001435 compl);
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001438 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001440}
1441
1442static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443{
1444 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446 struct be_adapter *adapter = rxo->adapter;
1447
1448 /* For checking the valid bit it is Ok to use either definition as the
1449 * valid bit is at the same position in both v0 and v1 Rx compl */
1450 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 return NULL;
1452
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001453 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001454 be_dws_le_to_cpu(compl, sizeof(*compl));
1455
1456 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001457 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001458 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001459 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001460
Sathya Perla15d72182011-03-21 20:49:26 +00001461 if (rxcp->vlanf) {
1462 /* vlanf could be wrongly set in some cards.
1463 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001464 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001465 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001466
Sathya Perla15d72182011-03-21 20:49:26 +00001467 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001468 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001469
Somnath Kotur939cf302011-08-18 21:51:49 -07001470 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001471 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001472 rxcp->vlanf = 0;
1473 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001474
1475 /* As the compl has been parsed, reset it; we wont touch it again */
1476 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Sathya Perla3abcded2010-10-03 22:12:27 -07001478 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 return rxcp;
1480}
1481
Eric Dumazet1829b082011-03-01 05:48:12 +00001482static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001485
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001487 gfp |= __GFP_COMP;
1488 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489}
1490
1491/*
1492 * Allocate a page, split it to fragments of size rx_frag_size and post as
1493 * receive buffers to BE
1494 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001495static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496{
Sathya Perla3abcded2010-10-03 22:12:27 -07001497 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001498 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001499 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 struct page *pagep = NULL;
1501 struct be_eth_rx_d *rxd;
1502 u64 page_dmaaddr = 0, frag_dmaaddr;
1503 u32 posted, page_offset = 0;
1504
Sathya Perla3abcded2010-10-03 22:12:27 -07001505 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001508 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001510 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 break;
1512 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001513 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514 0, adapter->big_page_size,
1515 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 page_info->page_offset = 0;
1517 } else {
1518 get_page(pagep);
1519 page_info->page_offset = page_offset + rx_frag_size;
1520 }
1521 page_offset = page_info->page_offset;
1522 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001523 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525
1526 rxd = queue_head_node(rxq);
1527 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529
1530 /* Any space left in the current big page for another frag? */
1531 if ((page_offset + rx_frag_size + rx_frag_size) >
1532 adapter->big_page_size) {
1533 pagep = NULL;
1534 page_info->last_page_user = true;
1535 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001536
1537 prev_page_info = page_info;
1538 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001539 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 }
1541 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001542 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543
1544 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001546 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001547 } else if (atomic_read(&rxq->used) == 0) {
1548 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001549 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551}
1552
Sathya Perla5fb379e2009-06-18 00:02:59 +00001553static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556
1557 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558 return NULL;
1559
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001560 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562
1563 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564
1565 queue_tail_inc(tx_cq);
1566 return txcp;
1567}
1568
Sathya Perla3c8def92011-06-12 20:01:58 +00001569static u16 be_tx_compl_process(struct be_adapter *adapter,
1570 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571{
Sathya Perla3c8def92011-06-12 20:01:58 +00001572 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001573 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001574 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001576 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001579 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001581 sent_skbs[txq->tail] = NULL;
1582
1583 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001584 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001586 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001588 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001589 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001591 unmap_skb_hdr = false;
1592
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 num_wrbs++;
1594 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001595 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001598 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599}
1600
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601/* Return the number of events in the event queue */
1602static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001603{
1604 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001605 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001606
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001607 do {
1608 eqe = queue_tail_node(&eqo->q);
1609 if (eqe->evt == 0)
1610 break;
1611
1612 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001613 eqe->evt = 0;
1614 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001615 queue_tail_inc(&eqo->q);
1616 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001617
1618 return num;
1619}
1620
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001621static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001622{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001623 bool rearm = false;
1624 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001625
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001626 /* Deal with any spurious interrupts that come without events */
1627 if (!num)
1628 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001629
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001630 if (num || msix_enabled(eqo->adapter))
1631 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632
Sathya Perla859b1e42009-08-10 03:43:51 +00001633 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001634 napi_schedule(&eqo->napi);
1635
1636 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001637}
1638
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001639/* Leaves the EQ is disarmed state */
1640static void be_eq_clean(struct be_eq_obj *eqo)
1641{
1642 int num = events_get(eqo);
1643
1644 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1645}
1646
1647static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648{
1649 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001650 struct be_queue_info *rxq = &rxo->q;
1651 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 u16 tail;
1654
1655 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001656 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657 be_rx_compl_discard(rxo, rxcp);
1658 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 }
1660
1661 /* Then free posted rx buffer that were not used */
1662 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001663 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665 put_page(page_info->page);
1666 memset(page_info, 0, sizeof(*page_info));
1667 }
1668 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001669 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670}
1671
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001672static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001674 struct be_tx_obj *txo;
1675 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001676 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001677 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001678 struct sk_buff *sent_skb;
1679 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001680 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681
Sathya Perlaa8e91792009-08-10 03:42:43 +00001682 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1683 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001684 pending_txqs = adapter->num_tx_qs;
1685
1686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 while ((txcp = be_tx_compl_get(&txo->cq))) {
1689 end_idx =
1690 AMAP_GET_BITS(struct amap_eth_tx_compl,
1691 wrb_index, txcp);
1692 num_wrbs += be_tx_compl_process(adapter, txo,
1693 end_idx);
1694 cmpl++;
1695 }
1696 if (cmpl) {
1697 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698 atomic_sub(num_wrbs, &txq->used);
1699 cmpl = 0;
1700 num_wrbs = 0;
1701 }
1702 if (atomic_read(&txq->used) == 0)
1703 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001704 }
1705
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001706 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001707 break;
1708
1709 mdelay(1);
1710 } while (true);
1711
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001712 for_all_tx_queues(adapter, txo, i) {
1713 txq = &txo->q;
1714 if (atomic_read(&txq->used))
1715 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001717
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001718 /* free posted tx for which compls will never arrive */
1719 while (atomic_read(&txq->used)) {
1720 sent_skb = txo->sent_skb_list[txq->tail];
1721 end_idx = txq->tail;
1722 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723 &dummy_wrb);
1724 index_adv(&end_idx, num_wrbs - 1, txq->len);
1725 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726 atomic_sub(num_wrbs, &txq->used);
1727 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001728 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729}
1730
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001731static void be_evt_queues_destroy(struct be_adapter *adapter)
1732{
1733 struct be_eq_obj *eqo;
1734 int i;
1735
1736 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001737 if (eqo->q.created) {
1738 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001739 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001740 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001741 be_queue_free(adapter, &eqo->q);
1742 }
1743}
1744
1745static int be_evt_queues_create(struct be_adapter *adapter)
1746{
1747 struct be_queue_info *eq;
1748 struct be_eq_obj *eqo;
1749 int i, rc;
1750
1751 adapter->num_evt_qs = num_irqs(adapter);
1752
1753 for_all_evt_queues(adapter, eqo, i) {
1754 eqo->adapter = adapter;
1755 eqo->tx_budget = BE_TX_BUDGET;
1756 eqo->idx = i;
1757 eqo->max_eqd = BE_MAX_EQD;
1758 eqo->enable_aic = true;
1759
1760 eq = &eqo->q;
1761 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762 sizeof(struct be_eq_entry));
1763 if (rc)
1764 return rc;
1765
1766 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1767 if (rc)
1768 return rc;
1769 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001770 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001771}
1772
Sathya Perla5fb379e2009-06-18 00:02:59 +00001773static void be_mcc_queues_destroy(struct be_adapter *adapter)
1774{
1775 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001776
Sathya Perla8788fdc2009-07-27 22:52:03 +00001777 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001778 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001779 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001780 be_queue_free(adapter, q);
1781
Sathya Perla8788fdc2009-07-27 22:52:03 +00001782 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001783 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001784 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001785 be_queue_free(adapter, q);
1786}
1787
1788/* Must be called only after TX qs are created as MCC shares TX EQ */
1789static int be_mcc_queues_create(struct be_adapter *adapter)
1790{
1791 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001792
Sathya Perla8788fdc2009-07-27 22:52:03 +00001793 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001794 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001795 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001796 goto err;
1797
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001798 /* Use the default EQ for MCC completions */
1799 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001800 goto mcc_cq_free;
1801
Sathya Perla8788fdc2009-07-27 22:52:03 +00001802 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001803 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1804 goto mcc_cq_destroy;
1805
Sathya Perla8788fdc2009-07-27 22:52:03 +00001806 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001807 goto mcc_q_free;
1808
1809 return 0;
1810
1811mcc_q_free:
1812 be_queue_free(adapter, q);
1813mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001814 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001815mcc_cq_free:
1816 be_queue_free(adapter, cq);
1817err:
1818 return -1;
1819}
1820
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821static void be_tx_queues_destroy(struct be_adapter *adapter)
1822{
1823 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001824 struct be_tx_obj *txo;
1825 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826
Sathya Perla3c8def92011-06-12 20:01:58 +00001827 for_all_tx_queues(adapter, txo, i) {
1828 q = &txo->q;
1829 if (q->created)
1830 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1831 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832
Sathya Perla3c8def92011-06-12 20:01:58 +00001833 q = &txo->cq;
1834 if (q->created)
1835 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1836 be_queue_free(adapter, q);
1837 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838}
1839
Sathya Perladafc0fe2011-10-24 02:45:02 +00001840static int be_num_txqs_want(struct be_adapter *adapter)
1841{
Sathya Perla39f1d942012-05-08 19:41:24 +00001842 if (sriov_want(adapter) || be_is_mc(adapter) ||
1843 lancer_chip(adapter) || !be_physfn(adapter) ||
1844 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001845 return 1;
1846 else
1847 return MAX_TX_QS;
1848}
1849
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001850static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001852 struct be_queue_info *cq, *eq;
1853 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001854 struct be_tx_obj *txo;
1855 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856
Sathya Perladafc0fe2011-10-24 02:45:02 +00001857 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001858 if (adapter->num_tx_qs != MAX_TX_QS) {
1859 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001860 netif_set_real_num_tx_queues(adapter->netdev,
1861 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001862 rtnl_unlock();
1863 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001864
Sathya Perla3c8def92011-06-12 20:01:58 +00001865 for_all_tx_queues(adapter, txo, i) {
1866 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001867 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1868 sizeof(struct be_eth_tx_compl));
1869 if (status)
1870 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001872 /* If num_evt_qs is less than num_tx_qs, then more than
1873 * one txq share an eq
1874 */
1875 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1876 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1877 if (status)
1878 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001879 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881}
1882
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883static int be_tx_qs_create(struct be_adapter *adapter)
1884{
1885 struct be_tx_obj *txo;
1886 int i, status;
1887
1888 for_all_tx_queues(adapter, txo, i) {
1889 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1890 sizeof(struct be_eth_wrb));
1891 if (status)
1892 return status;
1893
1894 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1895 if (status)
1896 return status;
1897 }
1898
Sathya Perlad3791422012-09-28 04:39:44 +00001899 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1900 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001901 return 0;
1902}
1903
1904static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905{
1906 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001907 struct be_rx_obj *rxo;
1908 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909
Sathya Perla3abcded2010-10-03 22:12:27 -07001910 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001911 q = &rxo->cq;
1912 if (q->created)
1913 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1914 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916}
1917
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001918static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001919{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001920 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001921 struct be_rx_obj *rxo;
1922 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001924 /* We'll create as many RSS rings as there are irqs.
1925 * But when there's only one irq there's no use creating RSS rings
1926 */
1927 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1928 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001929 if (adapter->num_rx_qs != MAX_RX_QS) {
1930 rtnl_lock();
1931 netif_set_real_num_rx_queues(adapter->netdev,
1932 adapter->num_rx_qs);
1933 rtnl_unlock();
1934 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001935
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001937 for_all_rx_queues(adapter, rxo, i) {
1938 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001939 cq = &rxo->cq;
1940 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1941 sizeof(struct be_eth_rx_compl));
1942 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1946 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001947 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001949 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950
Sathya Perlad3791422012-09-28 04:39:44 +00001951 dev_info(&adapter->pdev->dev,
1952 "created %d RSS queue(s) and 1 default RX queue\n",
1953 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001954 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001955}
1956
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957static irqreturn_t be_intx(int irq, void *dev)
1958{
1959 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001960 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 /* With INTx only one EQ is used */
1963 num_evts = event_handle(&adapter->eq_obj[0]);
1964 if (num_evts)
1965 return IRQ_HANDLED;
1966 else
1967 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968}
1969
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001970static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001972 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001974 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975 return IRQ_HANDLED;
1976}
1977
Sathya Perla2e588f82011-03-11 02:49:26 +00001978static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979{
Sathya Perla2e588f82011-03-11 02:49:26 +00001980 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981}
1982
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001983static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1984 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001985{
Sathya Perla3abcded2010-10-03 22:12:27 -07001986 struct be_adapter *adapter = rxo->adapter;
1987 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001988 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989 u32 work_done;
1990
1991 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001992 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993 if (!rxcp)
1994 break;
1995
Sathya Perla12004ae2011-08-02 19:57:46 +00001996 /* Is it a flush compl that has no data */
1997 if (unlikely(rxcp->num_rcvd == 0))
1998 goto loop_continue;
1999
2000 /* Discard compl with partial DMA Lancer B0 */
2001 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002003 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002004 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002005
Sathya Perla12004ae2011-08-02 19:57:46 +00002006 /* On BE drop pkts that arrive due to imperfect filtering in
2007 * promiscuous mode on some skews
2008 */
2009 if (unlikely(rxcp->port != adapter->port_num &&
2010 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002011 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002012 goto loop_continue;
2013 }
2014
2015 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002017 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002019loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002020 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021 }
2022
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002023 if (work_done) {
2024 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002025
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2027 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002029
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002030 return work_done;
2031}
2032
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002033static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2034 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002037 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002039 for (work_done = 0; work_done < budget; work_done++) {
2040 txcp = be_tx_compl_get(&txo->cq);
2041 if (!txcp)
2042 break;
2043 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002044 AMAP_GET_BITS(struct amap_eth_tx_compl,
2045 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002046 }
2047
2048 if (work_done) {
2049 be_cq_notify(adapter, txo->cq.id, true, work_done);
2050 atomic_sub(num_wrbs, &txo->q.used);
2051
2052 /* As Tx wrbs have been freed up, wake up netdev queue
2053 * if it was stopped due to lack of tx wrbs. */
2054 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2055 atomic_read(&txo->q.used) < txo->q.len / 2) {
2056 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002057 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002058
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002059 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2060 tx_stats(txo)->tx_compl += work_done;
2061 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2062 }
2063 return (work_done < budget); /* Done */
2064}
Sathya Perla3c8def92011-06-12 20:01:58 +00002065
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002066int be_poll(struct napi_struct *napi, int budget)
2067{
2068 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2069 struct be_adapter *adapter = eqo->adapter;
2070 int max_work = 0, work, i;
2071 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002072
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002073 /* Process all TXQs serviced by this EQ */
2074 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2075 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2076 eqo->tx_budget, i);
2077 if (!tx_done)
2078 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079 }
2080
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002081 /* This loop will iterate twice for EQ0 in which
2082 * completions of the last RXQ (default one) are also processed
2083 * For other EQs the loop iterates only once
2084 */
2085 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2086 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2087 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002088 }
2089
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002090 if (is_mcc_eqo(eqo))
2091 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002092
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002093 if (max_work < budget) {
2094 napi_complete(napi);
2095 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2096 } else {
2097 /* As we'll continue in polling mode, count and clear events */
2098 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002099 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002100 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002101}
2102
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002103void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002104{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002105 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2106 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002107 u32 i;
2108
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002109 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002110 return;
2111
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002112 if (lancer_chip(adapter)) {
2113 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2114 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2115 sliport_err1 = ioread32(adapter->db +
2116 SLIPORT_ERROR1_OFFSET);
2117 sliport_err2 = ioread32(adapter->db +
2118 SLIPORT_ERROR2_OFFSET);
2119 }
2120 } else {
2121 pci_read_config_dword(adapter->pdev,
2122 PCICFG_UE_STATUS_LOW, &ue_lo);
2123 pci_read_config_dword(adapter->pdev,
2124 PCICFG_UE_STATUS_HIGH, &ue_hi);
2125 pci_read_config_dword(adapter->pdev,
2126 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2127 pci_read_config_dword(adapter->pdev,
2128 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002129
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002130 ue_lo = (ue_lo & ~ue_lo_mask);
2131 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002132 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002133
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002134 if (ue_lo || ue_hi ||
2135 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002136 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002137 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002138 "Error detected in the card\n");
2139 }
2140
2141 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2142 dev_err(&adapter->pdev->dev,
2143 "ERR: sliport status 0x%x\n", sliport_status);
2144 dev_err(&adapter->pdev->dev,
2145 "ERR: sliport error1 0x%x\n", sliport_err1);
2146 dev_err(&adapter->pdev->dev,
2147 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002148 }
2149
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002150 if (ue_lo) {
2151 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2152 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002153 dev_err(&adapter->pdev->dev,
2154 "UE: %s bit set\n", ue_status_low_desc[i]);
2155 }
2156 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002157
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002158 if (ue_hi) {
2159 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2160 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002161 dev_err(&adapter->pdev->dev,
2162 "UE: %s bit set\n", ue_status_hi_desc[i]);
2163 }
2164 }
2165
2166}
2167
Sathya Perla8d56ff12009-11-22 22:02:26 +00002168static void be_msix_disable(struct be_adapter *adapter)
2169{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002170 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002171 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002172 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002173 }
2174}
2175
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002176static uint be_num_rss_want(struct be_adapter *adapter)
2177{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002178 u32 num = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla4cbdaf62012-08-28 20:37:40 +00002180 !sriov_want(adapter) && be_physfn(adapter)) {
Yuval Mintz30e80b52012-07-01 03:19:00 +00002181 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2182 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2183 }
2184 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002185}
2186
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187static void be_msix_enable(struct be_adapter *adapter)
2188{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002190 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002191 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002193 /* If RSS queues are not used, need a vec for default RX Q */
2194 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002195 if (be_roce_supported(adapter)) {
2196 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2197 (num_online_cpus() + 1));
2198 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2199 num_vec += num_roce_vec;
2200 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2201 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002203
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002204 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002205 adapter->msix_entries[i].entry = i;
2206
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002207 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002208 if (status == 0) {
2209 goto done;
2210 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002211 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002213 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002214 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002215 }
Sathya Perlad3791422012-09-28 04:39:44 +00002216
2217 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002218 return;
2219done:
Parav Pandit045508a2012-03-26 14:27:13 +00002220 if (be_roce_supported(adapter)) {
2221 if (num_vec > num_roce_vec) {
2222 adapter->num_msix_vec = num_vec - num_roce_vec;
2223 adapter->num_msix_roce_vec =
2224 num_vec - adapter->num_msix_vec;
2225 } else {
2226 adapter->num_msix_vec = num_vec;
2227 adapter->num_msix_roce_vec = 0;
2228 }
2229 } else
2230 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002231 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002232 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233}
2234
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002235static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002236 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239}
2240
2241static int be_msix_register(struct be_adapter *adapter)
2242{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002243 struct net_device *netdev = adapter->netdev;
2244 struct be_eq_obj *eqo;
2245 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002247 for_all_evt_queues(adapter, eqo, i) {
2248 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2249 vec = be_msix_vec_get(adapter, eqo);
2250 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002251 if (status)
2252 goto err_msix;
2253 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002254
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002256err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2258 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2259 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2260 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002261 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262 return status;
2263}
2264
2265static int be_irq_register(struct be_adapter *adapter)
2266{
2267 struct net_device *netdev = adapter->netdev;
2268 int status;
2269
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002270 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 status = be_msix_register(adapter);
2272 if (status == 0)
2273 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002274 /* INTx is not supported for VF */
2275 if (!be_physfn(adapter))
2276 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277 }
2278
2279 /* INTx */
2280 netdev->irq = adapter->pdev->irq;
2281 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2282 adapter);
2283 if (status) {
2284 dev_err(&adapter->pdev->dev,
2285 "INTx request IRQ failed - err %d\n", status);
2286 return status;
2287 }
2288done:
2289 adapter->isr_registered = true;
2290 return 0;
2291}
2292
2293static void be_irq_unregister(struct be_adapter *adapter)
2294{
2295 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002297 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298
2299 if (!adapter->isr_registered)
2300 return;
2301
2302 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002303 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304 free_irq(netdev->irq, adapter);
2305 goto done;
2306 }
2307
2308 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002309 for_all_evt_queues(adapter, eqo, i)
2310 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002311
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312done:
2313 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314}
2315
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002316static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002317{
2318 struct be_queue_info *q;
2319 struct be_rx_obj *rxo;
2320 int i;
2321
2322 for_all_rx_queues(adapter, rxo, i) {
2323 q = &rxo->q;
2324 if (q->created) {
2325 be_cmd_rxq_destroy(adapter, q);
2326 /* After the rxq is invalidated, wait for a grace time
2327 * of 1ms for all dma to end and the flush compl to
2328 * arrive
2329 */
2330 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002332 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002334 }
2335}
2336
Sathya Perla889cd4b2010-05-30 23:33:45 +00002337static int be_close(struct net_device *netdev)
2338{
2339 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340 struct be_eq_obj *eqo;
2341 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002342
Parav Pandit045508a2012-03-26 14:27:13 +00002343 be_roce_dev_close(adapter);
2344
Sathya Perla889cd4b2010-05-30 23:33:45 +00002345 be_async_mcc_disable(adapter);
2346
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002347 if (!lancer_chip(adapter))
2348 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002349
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002350 for_all_evt_queues(adapter, eqo, i) {
2351 napi_disable(&eqo->napi);
2352 if (msix_enabled(adapter))
2353 synchronize_irq(be_msix_vec_get(adapter, eqo));
2354 else
2355 synchronize_irq(netdev->irq);
2356 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002357 }
2358
Sathya Perla889cd4b2010-05-30 23:33:45 +00002359 be_irq_unregister(adapter);
2360
Sathya Perla889cd4b2010-05-30 23:33:45 +00002361 /* Wait for all pending tx completions to arrive so that
2362 * all tx skbs are freed.
2363 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002364 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002365
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002366 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002367 return 0;
2368}
2369
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002370static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002371{
2372 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002373 int rc, i, j;
2374 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002375
2376 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002377 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2378 sizeof(struct be_eth_rx_d));
2379 if (rc)
2380 return rc;
2381 }
2382
2383 /* The FW would like the default RXQ to be created first */
2384 rxo = default_rxo(adapter);
2385 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2386 adapter->if_handle, false, &rxo->rss_id);
2387 if (rc)
2388 return rc;
2389
2390 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002391 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002392 rx_frag_size, adapter->if_handle,
2393 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002394 if (rc)
2395 return rc;
2396 }
2397
2398 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002399 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2400 for_all_rss_queues(adapter, rxo, i) {
2401 if ((j + i) >= 128)
2402 break;
2403 rsstable[j + i] = rxo->rss_id;
2404 }
2405 }
2406 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002407 if (rc)
2408 return rc;
2409 }
2410
2411 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002412 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002413 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002414 return 0;
2415}
2416
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002417static int be_open(struct net_device *netdev)
2418{
2419 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002420 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002421 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002423 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002424 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002425
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002427 if (status)
2428 goto err;
2429
Sathya Perla5fb379e2009-06-18 00:02:59 +00002430 be_irq_register(adapter);
2431
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002432 if (!lancer_chip(adapter))
2433 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002434
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002435 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002436 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002437
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 for_all_tx_queues(adapter, txo, i)
2439 be_cq_notify(adapter, txo->cq.id, true, 0);
2440
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002441 be_async_mcc_enable(adapter);
2442
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002443 for_all_evt_queues(adapter, eqo, i) {
2444 napi_enable(&eqo->napi);
2445 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2446 }
2447
Sathya Perla323ff712012-09-28 04:39:43 +00002448 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002449 if (!status)
2450 be_link_status_update(adapter, link_status);
2451
Parav Pandit045508a2012-03-26 14:27:13 +00002452 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002453 return 0;
2454err:
2455 be_close(adapter->netdev);
2456 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002457}
2458
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002459static int be_setup_wol(struct be_adapter *adapter, bool enable)
2460{
2461 struct be_dma_mem cmd;
2462 int status = 0;
2463 u8 mac[ETH_ALEN];
2464
2465 memset(mac, 0, ETH_ALEN);
2466
2467 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002468 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2469 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002470 if (cmd.va == NULL)
2471 return -1;
2472 memset(cmd.va, 0, cmd.size);
2473
2474 if (enable) {
2475 status = pci_write_config_dword(adapter->pdev,
2476 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2477 if (status) {
2478 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002479 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002480 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2481 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002482 return status;
2483 }
2484 status = be_cmd_enable_magic_wol(adapter,
2485 adapter->netdev->dev_addr, &cmd);
2486 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2487 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2488 } else {
2489 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2490 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2491 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2492 }
2493
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002494 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002495 return status;
2496}
2497
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002498/*
2499 * Generate a seed MAC address from the PF MAC Address using jhash.
2500 * MAC Address for VFs are assigned incrementally starting from the seed.
2501 * These addresses are programmed in the ASIC by the PF and the VF driver
2502 * queries for the MAC address during its probe.
2503 */
2504static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2505{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002506 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002507 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002508 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002509 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002510
2511 be_vf_eth_addr_generate(adapter, mac);
2512
Sathya Perla11ac75e2011-12-13 00:58:50 +00002513 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002514 if (lancer_chip(adapter)) {
2515 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2516 } else {
2517 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002518 vf_cfg->if_handle,
2519 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002520 }
2521
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002522 if (status)
2523 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002524 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002525 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002526 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002527
2528 mac[5] += 1;
2529 }
2530 return status;
2531}
2532
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002533static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002534{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002535 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002536 u32 vf;
2537
Sathya Perla39f1d942012-05-08 19:41:24 +00002538 if (be_find_vfs(adapter, ASSIGNED)) {
2539 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2540 goto done;
2541 }
2542
Sathya Perla11ac75e2011-12-13 00:58:50 +00002543 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002544 if (lancer_chip(adapter))
2545 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2546 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002547 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2548 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002549
Sathya Perla11ac75e2011-12-13 00:58:50 +00002550 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2551 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002552 pci_disable_sriov(adapter->pdev);
2553done:
2554 kfree(adapter->vf_cfg);
2555 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002556}
2557
Sathya Perlaa54769f2011-10-24 02:45:00 +00002558static int be_clear(struct be_adapter *adapter)
2559{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002560 int i = 1;
2561
Sathya Perla191eb752012-02-23 18:50:13 +00002562 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2563 cancel_delayed_work_sync(&adapter->work);
2564 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2565 }
2566
Sathya Perla11ac75e2011-12-13 00:58:50 +00002567 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002568 be_vf_clear(adapter);
2569
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002570 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2571 be_cmd_pmac_del(adapter, adapter->if_handle,
2572 adapter->pmac_id[i], 0);
2573
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002574 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002575
2576 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002577 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002578 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002579 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002580
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002582 return 0;
2583}
2584
Sathya Perla39f1d942012-05-08 19:41:24 +00002585static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002586{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002587 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002588 int vf;
2589
Sathya Perla39f1d942012-05-08 19:41:24 +00002590 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2591 GFP_KERNEL);
2592 if (!adapter->vf_cfg)
2593 return -ENOMEM;
2594
Sathya Perla11ac75e2011-12-13 00:58:50 +00002595 for_all_vfs(adapter, vf_cfg, vf) {
2596 vf_cfg->if_handle = -1;
2597 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002598 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002599 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002600}
2601
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002602static int be_vf_setup(struct be_adapter *adapter)
2603{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002604 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002605 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002606 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002607 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002608 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002609
Sathya Perla39f1d942012-05-08 19:41:24 +00002610 enabled_vfs = be_find_vfs(adapter, ENABLED);
2611 if (enabled_vfs) {
2612 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2613 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2614 return 0;
2615 }
2616
2617 if (num_vfs > adapter->dev_num_vfs) {
2618 dev_warn(dev, "Device supports %d VFs and not %d\n",
2619 adapter->dev_num_vfs, num_vfs);
2620 num_vfs = adapter->dev_num_vfs;
2621 }
2622
2623 status = pci_enable_sriov(adapter->pdev, num_vfs);
2624 if (!status) {
2625 adapter->num_vfs = num_vfs;
2626 } else {
2627 /* Platform doesn't support SRIOV though device supports it */
2628 dev_warn(dev, "SRIOV enable failed\n");
2629 return 0;
2630 }
2631
2632 status = be_vf_setup_init(adapter);
2633 if (status)
2634 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002635
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002636 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2637 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002638 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002639 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2640 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002641 if (status)
2642 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002643 }
2644
Sathya Perla39f1d942012-05-08 19:41:24 +00002645 if (!enabled_vfs) {
2646 status = be_vf_eth_addr_config(adapter);
2647 if (status)
2648 goto err;
2649 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002650
Sathya Perla11ac75e2011-12-13 00:58:50 +00002651 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002652 lnk_speed = 1000;
2653 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002654 if (status)
2655 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002656 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002657
2658 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2659 vf + 1, vf_cfg->if_handle);
2660 if (status)
2661 goto err;
2662 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002663 }
2664 return 0;
2665err:
2666 return status;
2667}
2668
Sathya Perla30128032011-11-10 19:17:57 +00002669static void be_setup_init(struct be_adapter *adapter)
2670{
2671 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002672 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002673 adapter->if_handle = -1;
2674 adapter->be3_native = false;
2675 adapter->promiscuous = false;
2676 adapter->eq_next_idx = 0;
2677}
2678
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002679static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2680 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002681{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002682 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002683
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002684 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2685 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2686 if (!lancer_chip(adapter) && !be_physfn(adapter))
2687 *active_mac = true;
2688 else
2689 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002690
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002691 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002692 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002693
2694 if (lancer_chip(adapter)) {
2695 status = be_cmd_get_mac_from_list(adapter, mac,
2696 active_mac, pmac_id, 0);
2697 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002698 status = be_cmd_mac_addr_query(adapter, mac, false,
2699 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002700 }
2701 } else if (be_physfn(adapter)) {
2702 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002703 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002704 *active_mac = false;
2705 } else {
2706 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002707 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002708 if_handle, 0);
2709 *active_mac = true;
2710 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002711 return status;
2712}
2713
Sathya Perla39f1d942012-05-08 19:41:24 +00002714/* Routine to query per function resource limits */
2715static int be_get_config(struct be_adapter *adapter)
2716{
2717 int pos;
2718 u16 dev_num_vfs;
2719
2720 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2721 if (pos) {
2722 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2723 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002724 if (!lancer_chip(adapter))
2725 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002726 adapter->dev_num_vfs = dev_num_vfs;
2727 }
2728 return 0;
2729}
2730
Sathya Perla5fb379e2009-06-18 00:02:59 +00002731static int be_setup(struct be_adapter *adapter)
2732{
Sathya Perla39f1d942012-05-08 19:41:24 +00002733 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002734 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002735 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002736 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002737 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002738 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002739
Sathya Perla30128032011-11-10 19:17:57 +00002740 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002741
Sathya Perla39f1d942012-05-08 19:41:24 +00002742 be_get_config(adapter);
2743
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002744 be_cmd_req_native_mode(adapter);
2745
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002746 be_msix_enable(adapter);
2747
2748 status = be_evt_queues_create(adapter);
2749 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002750 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002751
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002752 status = be_tx_cqs_create(adapter);
2753 if (status)
2754 goto err;
2755
2756 status = be_rx_cqs_create(adapter);
2757 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002758 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002759
Sathya Perla5fb379e2009-06-18 00:02:59 +00002760 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002761 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002762 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002763
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002764 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2765 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2766 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002767 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2768
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002769 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2770 cap_flags |= BE_IF_FLAGS_RSS;
2771 en_flags |= BE_IF_FLAGS_RSS;
2772 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002773
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002774 if (lancer_chip(adapter) && !be_physfn(adapter)) {
2775 en_flags = BE_IF_FLAGS_UNTAGGED |
2776 BE_IF_FLAGS_BROADCAST |
2777 BE_IF_FLAGS_MULTICAST;
2778 cap_flags = en_flags;
2779 }
2780
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002781 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002782 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002783 if (status != 0)
2784 goto err;
2785
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002786 memset(mac, 0, ETH_ALEN);
2787 active_mac = false;
2788 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2789 &active_mac, &adapter->pmac_id[0]);
2790 if (status != 0)
2791 goto err;
2792
2793 if (!active_mac) {
2794 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2795 &adapter->pmac_id[0], 0);
2796 if (status != 0)
2797 goto err;
2798 }
2799
2800 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2801 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2802 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002803 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002804
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002805 status = be_tx_qs_create(adapter);
2806 if (status)
2807 goto err;
2808
Sathya Perla04b71172011-09-27 13:30:27 -04002809 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002810
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002811 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002812 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002813
2814 be_set_rx_mode(adapter->netdev);
2815
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002816 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002817
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002818 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2819 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002820 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002821
Sathya Perla39f1d942012-05-08 19:41:24 +00002822 if (be_physfn(adapter) && num_vfs) {
2823 if (adapter->dev_num_vfs)
2824 be_vf_setup(adapter);
2825 else
2826 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002827 }
2828
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002829 be_cmd_get_phy_info(adapter);
2830 if (be_pause_supported(adapter))
2831 adapter->phy.fc_autoneg = 1;
2832
Sathya Perla191eb752012-02-23 18:50:13 +00002833 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2834 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002835 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002836err:
2837 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002838 return status;
2839}
2840
Ivan Vecera66268732011-12-08 01:31:21 +00002841#ifdef CONFIG_NET_POLL_CONTROLLER
2842static void be_netpoll(struct net_device *netdev)
2843{
2844 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002845 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002846 int i;
2847
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002848 for_all_evt_queues(adapter, eqo, i)
2849 event_handle(eqo);
2850
2851 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002852}
2853#endif
2854
Ajit Khaparde84517482009-09-04 03:12:16 +00002855#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002856char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2857
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002858static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002859 const u8 *p, u32 img_start, int image_size,
2860 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002861{
2862 u32 crc_offset;
2863 u8 flashed_crc[4];
2864 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002865
2866 crc_offset = hdr_size + img_start + image_size - 4;
2867
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002868 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002869
2870 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002871 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002872 if (status) {
2873 dev_err(&adapter->pdev->dev,
2874 "could not get crc from flash, not flashing redboot\n");
2875 return false;
2876 }
2877
2878 /*update redboot only if crc does not match*/
2879 if (!memcmp(flashed_crc, p, 4))
2880 return false;
2881 else
2882 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002883}
2884
Sathya Perla306f1342011-08-02 19:57:45 +00002885static bool phy_flashing_required(struct be_adapter *adapter)
2886{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002887 return (adapter->phy.phy_type == TN_8022 &&
2888 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002889}
2890
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002891static bool is_comp_in_ufi(struct be_adapter *adapter,
2892 struct flash_section_info *fsec, int type)
2893{
2894 int i = 0, img_type = 0;
2895 struct flash_section_info_g2 *fsec_g2 = NULL;
2896
2897 if (adapter->generation != BE_GEN3)
2898 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2899
2900 for (i = 0; i < MAX_FLASH_COMP; i++) {
2901 if (fsec_g2)
2902 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2903 else
2904 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2905
2906 if (img_type == type)
2907 return true;
2908 }
2909 return false;
2910
2911}
2912
2913struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2914 int header_size,
2915 const struct firmware *fw)
2916{
2917 struct flash_section_info *fsec = NULL;
2918 const u8 *p = fw->data;
2919
2920 p += header_size;
2921 while (p < (fw->data + fw->size)) {
2922 fsec = (struct flash_section_info *)p;
2923 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2924 return fsec;
2925 p += 32;
2926 }
2927 return NULL;
2928}
2929
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002930static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002931 const struct firmware *fw,
2932 struct be_dma_mem *flash_cmd,
2933 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002934
Ajit Khaparde84517482009-09-04 03:12:16 +00002935{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002936 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002937 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002938 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002939 int num_bytes;
2940 const u8 *p = fw->data;
2941 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002942 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002943 int num_comp, hdr_size;
2944 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002945
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002946 struct flash_comp gen3_flash_types[] = {
2947 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2948 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2949 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2950 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2951 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2952 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2953 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2954 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2955 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2956 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2957 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2958 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2959 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2960 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2961 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2962 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2963 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2964 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2965 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2966 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002967 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002968
2969 struct flash_comp gen2_flash_types[] = {
2970 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2971 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2972 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2973 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2974 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2975 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2976 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2977 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2978 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2979 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2980 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2981 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2982 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2983 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2984 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2985 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002986 };
2987
2988 if (adapter->generation == BE_GEN3) {
2989 pflashcomp = gen3_flash_types;
2990 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002991 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002992 } else {
2993 pflashcomp = gen2_flash_types;
2994 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002995 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002996 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002997 /* Get flash section info*/
2998 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2999 if (!fsec) {
3000 dev_err(&adapter->pdev->dev,
3001 "Invalid Cookie. UFI corrupted ?\n");
3002 return -1;
3003 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003004 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003005 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003006 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003007
3008 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3009 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3010 continue;
3011
3012 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003013 if (!phy_flashing_required(adapter))
3014 continue;
3015 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003016
3017 hdr_size = filehdr_size +
3018 (num_of_images * sizeof(struct image_hdr));
3019
3020 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3021 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3022 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003023 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003024
3025 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003026 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003027 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003028 if (p + pflashcomp[i].size > fw->data + fw->size)
3029 return -1;
3030 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003031 while (total_bytes) {
3032 if (total_bytes > 32*1024)
3033 num_bytes = 32*1024;
3034 else
3035 num_bytes = total_bytes;
3036 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003037 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003038 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003039 flash_op = FLASHROM_OPER_PHY_FLASH;
3040 else
3041 flash_op = FLASHROM_OPER_FLASH;
3042 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003043 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003044 flash_op = FLASHROM_OPER_PHY_SAVE;
3045 else
3046 flash_op = FLASHROM_OPER_SAVE;
3047 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003048 memcpy(req->params.data_buf, p, num_bytes);
3049 p += num_bytes;
3050 status = be_cmd_write_flashrom(adapter, flash_cmd,
3051 pflashcomp[i].optype, flash_op, num_bytes);
3052 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003053 if ((status == ILLEGAL_IOCTL_REQ) &&
3054 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003055 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003056 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003057 dev_err(&adapter->pdev->dev,
3058 "cmd to write to flash rom failed.\n");
3059 return -1;
3060 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003061 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003062 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003063 return 0;
3064}
3065
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003066static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3067{
3068 if (fhdr == NULL)
3069 return 0;
3070 if (fhdr->build[0] == '3')
3071 return BE_GEN3;
3072 else if (fhdr->build[0] == '2')
3073 return BE_GEN2;
3074 else
3075 return 0;
3076}
3077
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003078static int lancer_wait_idle(struct be_adapter *adapter)
3079{
3080#define SLIPORT_IDLE_TIMEOUT 30
3081 u32 reg_val;
3082 int status = 0, i;
3083
3084 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3085 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3086 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3087 break;
3088
3089 ssleep(1);
3090 }
3091
3092 if (i == SLIPORT_IDLE_TIMEOUT)
3093 status = -1;
3094
3095 return status;
3096}
3097
3098static int lancer_fw_reset(struct be_adapter *adapter)
3099{
3100 int status = 0;
3101
3102 status = lancer_wait_idle(adapter);
3103 if (status)
3104 return status;
3105
3106 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3107 PHYSDEV_CONTROL_OFFSET);
3108
3109 return status;
3110}
3111
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003112static int lancer_fw_download(struct be_adapter *adapter,
3113 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003114{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003115#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3116#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3117 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003118 const u8 *data_ptr = NULL;
3119 u8 *dest_image_ptr = NULL;
3120 size_t image_size = 0;
3121 u32 chunk_size = 0;
3122 u32 data_written = 0;
3123 u32 offset = 0;
3124 int status = 0;
3125 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003126 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003127
3128 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3129 dev_err(&adapter->pdev->dev,
3130 "FW Image not properly aligned. "
3131 "Length must be 4 byte aligned.\n");
3132 status = -EINVAL;
3133 goto lancer_fw_exit;
3134 }
3135
3136 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3137 + LANCER_FW_DOWNLOAD_CHUNK;
3138 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3139 &flash_cmd.dma, GFP_KERNEL);
3140 if (!flash_cmd.va) {
3141 status = -ENOMEM;
3142 dev_err(&adapter->pdev->dev,
3143 "Memory allocation failure while flashing\n");
3144 goto lancer_fw_exit;
3145 }
3146
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003147 dest_image_ptr = flash_cmd.va +
3148 sizeof(struct lancer_cmd_req_write_object);
3149 image_size = fw->size;
3150 data_ptr = fw->data;
3151
3152 while (image_size) {
3153 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3154
3155 /* Copy the image chunk content. */
3156 memcpy(dest_image_ptr, data_ptr, chunk_size);
3157
3158 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003159 chunk_size, offset,
3160 LANCER_FW_DOWNLOAD_LOCATION,
3161 &data_written, &change_status,
3162 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003163 if (status)
3164 break;
3165
3166 offset += data_written;
3167 data_ptr += data_written;
3168 image_size -= data_written;
3169 }
3170
3171 if (!status) {
3172 /* Commit the FW written */
3173 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003174 0, offset,
3175 LANCER_FW_DOWNLOAD_LOCATION,
3176 &data_written, &change_status,
3177 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003178 }
3179
3180 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3181 flash_cmd.dma);
3182 if (status) {
3183 dev_err(&adapter->pdev->dev,
3184 "Firmware load error. "
3185 "Status code: 0x%x Additional Status: 0x%x\n",
3186 status, add_status);
3187 goto lancer_fw_exit;
3188 }
3189
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003190 if (change_status == LANCER_FW_RESET_NEEDED) {
3191 status = lancer_fw_reset(adapter);
3192 if (status) {
3193 dev_err(&adapter->pdev->dev,
3194 "Adapter busy for FW reset.\n"
3195 "New FW will not be active.\n");
3196 goto lancer_fw_exit;
3197 }
3198 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3199 dev_err(&adapter->pdev->dev,
3200 "System reboot required for new FW"
3201 " to be active\n");
3202 }
3203
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003204 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3205lancer_fw_exit:
3206 return status;
3207}
3208
3209static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3210{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003211 struct flash_file_hdr_g2 *fhdr;
3212 struct flash_file_hdr_g3 *fhdr3;
3213 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003214 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003215 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003216 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003217
3218 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003219 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003220
Ajit Khaparde84517482009-09-04 03:12:16 +00003221 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003222 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3223 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003224 if (!flash_cmd.va) {
3225 status = -ENOMEM;
3226 dev_err(&adapter->pdev->dev,
3227 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003228 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003229 }
3230
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003231 if ((adapter->generation == BE_GEN3) &&
3232 (get_ufigen_type(fhdr) == BE_GEN3)) {
3233 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003234 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3235 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003236 img_hdr_ptr = (struct image_hdr *) (fw->data +
3237 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003238 i * sizeof(struct image_hdr)));
3239 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3240 status = be_flash_data(adapter, fw, &flash_cmd,
3241 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003242 }
3243 } else if ((adapter->generation == BE_GEN2) &&
3244 (get_ufigen_type(fhdr) == BE_GEN2)) {
3245 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3246 } else {
3247 dev_err(&adapter->pdev->dev,
3248 "UFI and Interface are not compatible for flashing\n");
3249 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003250 }
3251
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003252 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3253 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003254 if (status) {
3255 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003256 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003257 }
3258
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003259 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003260
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003261be_fw_exit:
3262 return status;
3263}
3264
3265int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3266{
3267 const struct firmware *fw;
3268 int status;
3269
3270 if (!netif_running(adapter->netdev)) {
3271 dev_err(&adapter->pdev->dev,
3272 "Firmware load not allowed (interface is down)\n");
3273 return -1;
3274 }
3275
3276 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3277 if (status)
3278 goto fw_exit;
3279
3280 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3281
3282 if (lancer_chip(adapter))
3283 status = lancer_fw_download(adapter, fw);
3284 else
3285 status = be_fw_download(adapter, fw);
3286
Ajit Khaparde84517482009-09-04 03:12:16 +00003287fw_exit:
3288 release_firmware(fw);
3289 return status;
3290}
3291
stephen hemmingere5686ad2012-01-05 19:10:25 +00003292static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003293 .ndo_open = be_open,
3294 .ndo_stop = be_close,
3295 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003296 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003297 .ndo_set_mac_address = be_mac_addr_set,
3298 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003299 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003300 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003301 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3302 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003303 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003304 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003305 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003306 .ndo_get_vf_config = be_get_vf_config,
3307#ifdef CONFIG_NET_POLL_CONTROLLER
3308 .ndo_poll_controller = be_netpoll,
3309#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310};
3311
3312static void be_netdev_init(struct net_device *netdev)
3313{
3314 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003315 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003316 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003317
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003318 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003319 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3320 NETIF_F_HW_VLAN_TX;
3321 if (be_multi_rxq(adapter))
3322 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003323
3324 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003325 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003326
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003327 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003328 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003329
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003330 netdev->priv_flags |= IFF_UNICAST_FLT;
3331
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003332 netdev->flags |= IFF_MULTICAST;
3333
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003334 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003335
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003336 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003337
3338 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3339
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003340 for_all_evt_queues(adapter, eqo, i)
3341 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003342}
3343
3344static void be_unmap_pci_bars(struct be_adapter *adapter)
3345{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003346 if (adapter->csr)
3347 iounmap(adapter->csr);
3348 if (adapter->db)
3349 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003350 if (adapter->roce_db.base)
3351 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3352}
3353
3354static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3355{
3356 struct pci_dev *pdev = adapter->pdev;
3357 u8 __iomem *addr;
3358
3359 addr = pci_iomap(pdev, 2, 0);
3360 if (addr == NULL)
3361 return -ENOMEM;
3362
3363 adapter->roce_db.base = addr;
3364 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3365 adapter->roce_db.size = 8192;
3366 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3367 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003368}
3369
3370static int be_map_pci_bars(struct be_adapter *adapter)
3371{
3372 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003373 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003374
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003375 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003376 if (be_type_2_3(adapter)) {
3377 addr = ioremap_nocache(
3378 pci_resource_start(adapter->pdev, 0),
3379 pci_resource_len(adapter->pdev, 0));
3380 if (addr == NULL)
3381 return -ENOMEM;
3382 adapter->db = addr;
3383 }
3384 if (adapter->if_type == SLI_INTF_TYPE_3) {
3385 if (lancer_roce_map_pci_bars(adapter))
3386 goto pci_map_err;
3387 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003388 return 0;
3389 }
3390
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003391 if (be_physfn(adapter)) {
3392 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3393 pci_resource_len(adapter->pdev, 2));
3394 if (addr == NULL)
3395 return -ENOMEM;
3396 adapter->csr = addr;
3397 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003398
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003399 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003400 db_reg = 4;
3401 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003402 if (be_physfn(adapter))
3403 db_reg = 4;
3404 else
3405 db_reg = 0;
3406 }
3407 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3408 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003409 if (addr == NULL)
3410 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003411 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003412 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3413 adapter->roce_db.size = 4096;
3414 adapter->roce_db.io_addr =
3415 pci_resource_start(adapter->pdev, db_reg);
3416 adapter->roce_db.total_size =
3417 pci_resource_len(adapter->pdev, db_reg);
3418 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003419 return 0;
3420pci_map_err:
3421 be_unmap_pci_bars(adapter);
3422 return -ENOMEM;
3423}
3424
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003425static void be_ctrl_cleanup(struct be_adapter *adapter)
3426{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003427 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003428
3429 be_unmap_pci_bars(adapter);
3430
3431 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003432 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3433 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003434
Sathya Perla5b8821b2011-08-02 19:57:44 +00003435 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003436 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003437 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3438 mem->dma);
Sathya Perlacc7d7232012-08-28 20:37:43 +00003439 kfree(adapter->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003440}
3441
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003442static int be_ctrl_init(struct be_adapter *adapter)
3443{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003444 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3445 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003446 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003447 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003448
3449 status = be_map_pci_bars(adapter);
3450 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003451 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003452
3453 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003454 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3455 mbox_mem_alloc->size,
3456 &mbox_mem_alloc->dma,
3457 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003458 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003459 status = -ENOMEM;
3460 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003461 }
3462 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3463 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3464 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3465 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003466
Sathya Perla5b8821b2011-08-02 19:57:44 +00003467 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3468 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3469 &rx_filter->dma, GFP_KERNEL);
3470 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003471 status = -ENOMEM;
3472 goto free_mbox;
3473 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003474 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003475
Sathya Perlacc7d7232012-08-28 20:37:43 +00003476 /* primary mac needs 1 pmac entry */
3477 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3478 sizeof(*adapter->pmac_id), GFP_KERNEL);
3479 if (!adapter->pmac_id)
3480 return -ENOMEM;
3481
Ivan Vecera29849612010-12-14 05:43:19 +00003482 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003483 spin_lock_init(&adapter->mcc_lock);
3484 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003485
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003486 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003487 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003488 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003489
3490free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003491 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3492 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003493
3494unmap_pci_bars:
3495 be_unmap_pci_bars(adapter);
3496
3497done:
3498 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003499}
3500
3501static void be_stats_cleanup(struct be_adapter *adapter)
3502{
Sathya Perla3abcded2010-10-03 22:12:27 -07003503 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003504
3505 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003506 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3507 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003508}
3509
3510static int be_stats_init(struct be_adapter *adapter)
3511{
Sathya Perla3abcded2010-10-03 22:12:27 -07003512 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003513
Selvin Xavier005d5692011-05-16 07:36:35 +00003514 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003515 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003516 } else {
3517 if (lancer_chip(adapter))
3518 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3519 else
3520 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3521 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003522 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3523 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003524 if (cmd->va == NULL)
3525 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003526 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003527 return 0;
3528}
3529
3530static void __devexit be_remove(struct pci_dev *pdev)
3531{
3532 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003533
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003534 if (!adapter)
3535 return;
3536
Parav Pandit045508a2012-03-26 14:27:13 +00003537 be_roce_dev_remove(adapter);
3538
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003539 cancel_delayed_work_sync(&adapter->func_recovery_work);
3540
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003541 unregister_netdev(adapter->netdev);
3542
Sathya Perla5fb379e2009-06-18 00:02:59 +00003543 be_clear(adapter);
3544
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003545 /* tell fw we're done with firing cmds */
3546 be_cmd_fw_clean(adapter);
3547
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003548 be_stats_cleanup(adapter);
3549
3550 be_ctrl_cleanup(adapter);
3551
Sathya Perlad6b6d982012-09-05 01:56:48 +00003552 pci_disable_pcie_error_reporting(pdev);
3553
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003554 pci_set_drvdata(pdev, NULL);
3555 pci_release_regions(pdev);
3556 pci_disable_device(pdev);
3557
3558 free_netdev(adapter->netdev);
3559}
3560
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003561bool be_is_wol_supported(struct be_adapter *adapter)
3562{
3563 return ((adapter->wol_cap & BE_WOL_CAP) &&
3564 !be_is_wol_excluded(adapter)) ? true : false;
3565}
3566
Somnath Kotur941a77d2012-05-17 22:59:03 +00003567u32 be_get_fw_log_level(struct be_adapter *adapter)
3568{
3569 struct be_dma_mem extfat_cmd;
3570 struct be_fat_conf_params *cfgs;
3571 int status;
3572 u32 level = 0;
3573 int j;
3574
3575 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3576 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3577 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3578 &extfat_cmd.dma);
3579
3580 if (!extfat_cmd.va) {
3581 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3582 __func__);
3583 goto err;
3584 }
3585
3586 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3587 if (!status) {
3588 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3589 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003590 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003591 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3592 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3593 }
3594 }
3595 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3596 extfat_cmd.dma);
3597err:
3598 return level;
3599}
Sathya Perla39f1d942012-05-08 19:41:24 +00003600static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003601{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003602 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003603 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003604
Sathya Perla3abcded2010-10-03 22:12:27 -07003605 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3606 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003607 if (status)
3608 return status;
3609
Sathya Perla752961a2011-10-24 02:45:03 +00003610 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003611 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003612 else
3613 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3614
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003615 if (be_physfn(adapter))
3616 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3617 else
3618 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3619
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003620 status = be_cmd_get_cntl_attributes(adapter);
3621 if (status)
3622 return status;
3623
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003624 status = be_cmd_get_acpi_wol_cap(adapter);
3625 if (status) {
3626 /* in case of a failure to get wol capabillities
3627 * check the exclusion list to determine WOL capability */
3628 if (!be_is_wol_excluded(adapter))
3629 adapter->wol_cap |= BE_WOL_CAP;
3630 }
3631
3632 if (be_is_wol_supported(adapter))
3633 adapter->wol = true;
3634
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003635 /* Must be a power of 2 or else MODULO will BUG_ON */
3636 adapter->be_get_temp_freq = 64;
3637
Somnath Kotur941a77d2012-05-17 22:59:03 +00003638 level = be_get_fw_log_level(adapter);
3639 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3640
Sathya Perla2243e2e2009-11-22 22:02:03 +00003641 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003642}
3643
Sathya Perla39f1d942012-05-08 19:41:24 +00003644static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003645{
3646 struct pci_dev *pdev = adapter->pdev;
3647 u32 sli_intf = 0, if_type;
3648
3649 switch (pdev->device) {
3650 case BE_DEVICE_ID1:
3651 case OC_DEVICE_ID1:
3652 adapter->generation = BE_GEN2;
3653 break;
3654 case BE_DEVICE_ID2:
3655 case OC_DEVICE_ID2:
3656 adapter->generation = BE_GEN3;
3657 break;
3658 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003659 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003660 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003661 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3662 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003663 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3664 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003665 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003666 !be_type_2_3(adapter)) {
3667 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3668 return -EINVAL;
3669 }
3670 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3671 SLI_INTF_FAMILY_SHIFT);
3672 adapter->generation = BE_GEN3;
3673 break;
3674 case OC_DEVICE_ID5:
3675 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3676 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003677 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3678 return -EINVAL;
3679 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003680 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3681 SLI_INTF_FAMILY_SHIFT);
3682 adapter->generation = BE_GEN3;
3683 break;
3684 default:
3685 adapter->generation = 0;
3686 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003687
3688 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3689 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003690 return 0;
3691}
3692
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003693static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003694{
3695 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003696
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003697 status = lancer_test_and_set_rdy_state(adapter);
3698 if (status)
3699 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003700
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003701 if (netif_running(adapter->netdev))
3702 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003703
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003704 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003705
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003706 adapter->hw_error = false;
3707 adapter->fw_timeout = false;
3708
3709 status = be_setup(adapter);
3710 if (status)
3711 goto err;
3712
3713 if (netif_running(adapter->netdev)) {
3714 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003715 if (status)
3716 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003717 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003718
3719 dev_err(&adapter->pdev->dev,
3720 "Adapter SLIPORT recovery succeeded\n");
3721 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003722err:
3723 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003724 "Adapter SLIPORT recovery failed\n");
3725
3726 return status;
3727}
3728
3729static void be_func_recovery_task(struct work_struct *work)
3730{
3731 struct be_adapter *adapter =
3732 container_of(work, struct be_adapter, func_recovery_work.work);
3733 int status;
3734
3735 be_detect_error(adapter);
3736
3737 if (adapter->hw_error && lancer_chip(adapter)) {
3738
3739 if (adapter->eeh_error)
3740 goto out;
3741
3742 rtnl_lock();
3743 netif_device_detach(adapter->netdev);
3744 rtnl_unlock();
3745
3746 status = lancer_recover_func(adapter);
3747
3748 if (!status)
3749 netif_device_attach(adapter->netdev);
3750 }
3751
3752out:
3753 schedule_delayed_work(&adapter->func_recovery_work,
3754 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003755}
3756
3757static void be_worker(struct work_struct *work)
3758{
3759 struct be_adapter *adapter =
3760 container_of(work, struct be_adapter, work.work);
3761 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003762 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003763 int i;
3764
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003765 /* when interrupts are not yet enabled, just reap any pending
3766 * mcc completions */
3767 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00003768 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003769 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00003770 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003771 goto reschedule;
3772 }
3773
3774 if (!adapter->stats_cmd_sent) {
3775 if (lancer_chip(adapter))
3776 lancer_cmd_get_pport_stats(adapter,
3777 &adapter->stats_cmd);
3778 else
3779 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3780 }
3781
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003782 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3783 be_cmd_get_die_temperature(adapter);
3784
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003785 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003786 if (rxo->rx_post_starved) {
3787 rxo->rx_post_starved = false;
3788 be_post_rx_frags(rxo, GFP_KERNEL);
3789 }
3790 }
3791
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003792 for_all_evt_queues(adapter, eqo, i)
3793 be_eqd_update(adapter, eqo);
3794
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003795reschedule:
3796 adapter->work_counter++;
3797 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3798}
3799
Sathya Perla39f1d942012-05-08 19:41:24 +00003800static bool be_reset_required(struct be_adapter *adapter)
3801{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003802 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003803}
3804
Sathya Perlad3791422012-09-28 04:39:44 +00003805static char *mc_name(struct be_adapter *adapter)
3806{
3807 if (adapter->function_mode & FLEX10_MODE)
3808 return "FLEX10";
3809 else if (adapter->function_mode & VNIC_MODE)
3810 return "vNIC";
3811 else if (adapter->function_mode & UMC_ENABLED)
3812 return "UMC";
3813 else
3814 return "";
3815}
3816
3817static inline char *func_name(struct be_adapter *adapter)
3818{
3819 return be_physfn(adapter) ? "PF" : "VF";
3820}
3821
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003822static int __devinit be_probe(struct pci_dev *pdev,
3823 const struct pci_device_id *pdev_id)
3824{
3825 int status = 0;
3826 struct be_adapter *adapter;
3827 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003828 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003829
3830 status = pci_enable_device(pdev);
3831 if (status)
3832 goto do_none;
3833
3834 status = pci_request_regions(pdev, DRV_NAME);
3835 if (status)
3836 goto disable_dev;
3837 pci_set_master(pdev);
3838
Sathya Perla7f640062012-06-05 19:37:20 +00003839 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840 if (netdev == NULL) {
3841 status = -ENOMEM;
3842 goto rel_reg;
3843 }
3844 adapter = netdev_priv(netdev);
3845 adapter->pdev = pdev;
3846 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003847
Sathya Perla39f1d942012-05-08 19:41:24 +00003848 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003849 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003850 goto free_netdev;
3851
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003852 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003853 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003854
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003855 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003856 if (!status) {
3857 netdev->features |= NETIF_F_HIGHDMA;
3858 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003859 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003860 if (status) {
3861 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3862 goto free_netdev;
3863 }
3864 }
3865
Sathya Perlad6b6d982012-09-05 01:56:48 +00003866 status = pci_enable_pcie_error_reporting(pdev);
3867 if (status)
3868 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3869
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003870 status = be_ctrl_init(adapter);
3871 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003872 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003873
Sathya Perla2243e2e2009-11-22 22:02:03 +00003874 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003875 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003876 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003877 if (status)
3878 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003879 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003880
3881 /* tell fw we're ready to fire cmds */
3882 status = be_cmd_fw_init(adapter);
3883 if (status)
3884 goto ctrl_clean;
3885
Sathya Perla39f1d942012-05-08 19:41:24 +00003886 if (be_reset_required(adapter)) {
3887 status = be_cmd_reset_function(adapter);
3888 if (status)
3889 goto ctrl_clean;
3890 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003891
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003892 /* The INTR bit may be set in the card when probed by a kdump kernel
3893 * after a crash.
3894 */
3895 if (!lancer_chip(adapter))
3896 be_intr_set(adapter, false);
3897
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003898 status = be_stats_init(adapter);
3899 if (status)
3900 goto ctrl_clean;
3901
Sathya Perla39f1d942012-05-08 19:41:24 +00003902 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003903 if (status)
3904 goto stats_clean;
3905
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003906 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003907 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003908 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003909
Sathya Perla5fb379e2009-06-18 00:02:59 +00003910 status = be_setup(adapter);
3911 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00003912 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003913
Sathya Perla3abcded2010-10-03 22:12:27 -07003914 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003915 status = register_netdev(netdev);
3916 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003917 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003918
Parav Pandit045508a2012-03-26 14:27:13 +00003919 be_roce_dev_add(adapter);
3920
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003921 schedule_delayed_work(&adapter->func_recovery_work,
3922 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003923
3924 be_cmd_query_port_name(adapter, &port_name);
3925
Sathya Perlad3791422012-09-28 04:39:44 +00003926 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
3927 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003928
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003929 return 0;
3930
Sathya Perla5fb379e2009-06-18 00:02:59 +00003931unsetup:
3932 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003933stats_clean:
3934 be_stats_cleanup(adapter);
3935ctrl_clean:
3936 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003937free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003938 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003939 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003940rel_reg:
3941 pci_release_regions(pdev);
3942disable_dev:
3943 pci_disable_device(pdev);
3944do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003945 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003946 return status;
3947}
3948
3949static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3950{
3951 struct be_adapter *adapter = pci_get_drvdata(pdev);
3952 struct net_device *netdev = adapter->netdev;
3953
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003954 if (adapter->wol)
3955 be_setup_wol(adapter, true);
3956
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003957 cancel_delayed_work_sync(&adapter->func_recovery_work);
3958
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003959 netif_device_detach(netdev);
3960 if (netif_running(netdev)) {
3961 rtnl_lock();
3962 be_close(netdev);
3963 rtnl_unlock();
3964 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003965 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003966
3967 pci_save_state(pdev);
3968 pci_disable_device(pdev);
3969 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3970 return 0;
3971}
3972
3973static int be_resume(struct pci_dev *pdev)
3974{
3975 int status = 0;
3976 struct be_adapter *adapter = pci_get_drvdata(pdev);
3977 struct net_device *netdev = adapter->netdev;
3978
3979 netif_device_detach(netdev);
3980
3981 status = pci_enable_device(pdev);
3982 if (status)
3983 return status;
3984
3985 pci_set_power_state(pdev, 0);
3986 pci_restore_state(pdev);
3987
Sathya Perla2243e2e2009-11-22 22:02:03 +00003988 /* tell fw we're ready to fire cmds */
3989 status = be_cmd_fw_init(adapter);
3990 if (status)
3991 return status;
3992
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003993 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003994 if (netif_running(netdev)) {
3995 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003996 be_open(netdev);
3997 rtnl_unlock();
3998 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003999
4000 schedule_delayed_work(&adapter->func_recovery_work,
4001 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004002 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004003
4004 if (adapter->wol)
4005 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004006
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004007 return 0;
4008}
4009
Sathya Perla82456b02010-02-17 01:35:37 +00004010/*
4011 * An FLR will stop BE from DMAing any data.
4012 */
4013static void be_shutdown(struct pci_dev *pdev)
4014{
4015 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004016
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004017 if (!adapter)
4018 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004019
Sathya Perla0f4a6822011-03-21 20:49:28 +00004020 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004021 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004022
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004023 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004024
Sathya Perla82456b02010-02-17 01:35:37 +00004025 if (adapter->wol)
4026 be_setup_wol(adapter, true);
4027
Ajit Khaparde57841862011-04-06 18:08:43 +00004028 be_cmd_reset_function(adapter);
4029
Sathya Perla82456b02010-02-17 01:35:37 +00004030 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004031}
4032
Sathya Perlacf588472010-02-14 21:22:01 +00004033static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4034 pci_channel_state_t state)
4035{
4036 struct be_adapter *adapter = pci_get_drvdata(pdev);
4037 struct net_device *netdev = adapter->netdev;
4038
4039 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4040
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004041 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004042
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004043 cancel_delayed_work_sync(&adapter->func_recovery_work);
4044
4045 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004046 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004047 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004048
4049 if (netif_running(netdev)) {
4050 rtnl_lock();
4051 be_close(netdev);
4052 rtnl_unlock();
4053 }
4054 be_clear(adapter);
4055
4056 if (state == pci_channel_io_perm_failure)
4057 return PCI_ERS_RESULT_DISCONNECT;
4058
4059 pci_disable_device(pdev);
4060
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004061 /* The error could cause the FW to trigger a flash debug dump.
4062 * Resetting the card while flash dump is in progress
4063 * can cause it not to recover; wait for it to finish
4064 */
4065 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00004066 return PCI_ERS_RESULT_NEED_RESET;
4067}
4068
4069static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4070{
4071 struct be_adapter *adapter = pci_get_drvdata(pdev);
4072 int status;
4073
4074 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004075 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004076
4077 status = pci_enable_device(pdev);
4078 if (status)
4079 return PCI_ERS_RESULT_DISCONNECT;
4080
4081 pci_set_master(pdev);
4082 pci_set_power_state(pdev, 0);
4083 pci_restore_state(pdev);
4084
4085 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004086 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004087 if (status)
4088 return PCI_ERS_RESULT_DISCONNECT;
4089
Sathya Perlad6b6d982012-09-05 01:56:48 +00004090 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004091 return PCI_ERS_RESULT_RECOVERED;
4092}
4093
4094static void be_eeh_resume(struct pci_dev *pdev)
4095{
4096 int status = 0;
4097 struct be_adapter *adapter = pci_get_drvdata(pdev);
4098 struct net_device *netdev = adapter->netdev;
4099
4100 dev_info(&adapter->pdev->dev, "EEH resume\n");
4101
4102 pci_save_state(pdev);
4103
4104 /* tell fw we're ready to fire cmds */
4105 status = be_cmd_fw_init(adapter);
4106 if (status)
4107 goto err;
4108
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004109 status = be_cmd_reset_function(adapter);
4110 if (status)
4111 goto err;
4112
Sathya Perlacf588472010-02-14 21:22:01 +00004113 status = be_setup(adapter);
4114 if (status)
4115 goto err;
4116
4117 if (netif_running(netdev)) {
4118 status = be_open(netdev);
4119 if (status)
4120 goto err;
4121 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004122
4123 schedule_delayed_work(&adapter->func_recovery_work,
4124 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004125 netif_device_attach(netdev);
4126 return;
4127err:
4128 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004129}
4130
4131static struct pci_error_handlers be_eeh_handlers = {
4132 .error_detected = be_eeh_err_detected,
4133 .slot_reset = be_eeh_reset,
4134 .resume = be_eeh_resume,
4135};
4136
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004137static struct pci_driver be_driver = {
4138 .name = DRV_NAME,
4139 .id_table = be_dev_ids,
4140 .probe = be_probe,
4141 .remove = be_remove,
4142 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004143 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004144 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004145 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004146};
4147
4148static int __init be_init_module(void)
4149{
Joe Perches8e95a202009-12-03 07:58:21 +00004150 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4151 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004152 printk(KERN_WARNING DRV_NAME
4153 " : Module param rx_frag_size must be 2048/4096/8192."
4154 " Using 2048\n");
4155 rx_frag_size = 2048;
4156 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004157
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004158 return pci_register_driver(&be_driver);
4159}
4160module_init(be_init_module);
4161
4162static void __exit be_exit_module(void)
4163{
4164 pci_unregister_driver(&be_driver);
4165}
4166module_exit(be_exit_module);