blob: 9b734a0f8efa0ce2202702dd1eefd00cd5c71770 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070051static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000052 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070086static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000087 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700110 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
Sathya Perla752961a2011-10-24 02:45:03 +0000121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 mem->va = NULL;
135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000150 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
Sathya Perla8788fdc2009-07-27 22:52:03 +0000155static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000159 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000160 return;
161
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000205 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000206 return;
207
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000224 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000225 return;
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000238 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000239 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000241 if (!is_valid_ether_addr(addr->sa_data))
242 return -EADDRNOTAVAIL;
243
Sathya Perla5ee49792012-09-28 04:39:41 +0000244 status = be_cmd_mac_addr_query(adapter, current_mac, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000561 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562}
563
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566{
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578}
579
Somnath Kotur93040ae2012-06-26 22:32:10 +0000580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
Somnath Koturcc4ce022010-10-21 07:11:14 -0700585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000588 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700589
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590 memset(hdr, 0, sizeof(*hdr));
591
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000594 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 }
616
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700617 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 }
622
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627}
628
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000630 bool unmap_single)
631{
632 dma_addr_t dma;
633
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000637 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000638 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 dma_unmap_single(dev, dma, wrb->frag_len,
640 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000641 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000643 }
644}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla3c8def92011-06-12 20:01:58 +0000646static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648{
Sathya Perla7101e112010-03-22 20:41:12 +0000649 dma_addr_t busaddr;
650 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000655 bool map_single = false;
656 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 hdr = queue_head_node(txq);
659 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000660 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700663 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000666 goto dma_err;
667 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
672 copied += len;
673 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674
David S. Millerebc8d2a2009-06-09 01:01:31 -0700675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000676 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700677 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000678 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000679 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000680 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000681 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700682 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000686 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 }
688
689 if (dummy_wrb) {
690 wrb = queue_head_node(txq);
691 wrb_fill(wrb, 0, 0);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 queue_head_inc(txq);
694 }
695
Somnath Koturcc4ce022010-10-21 07:11:14 -0700696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000700dma_err:
701 txq->head = map_head;
702 while (copied) {
703 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000704 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000705 map_single = false;
706 copied -= wrb->frag_len;
707 queue_head_inc(txq);
708 }
709 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710}
711
Somnath Kotur93040ae2012-06-26 22:32:10 +0000712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
Stephen Hemminger613573252009-08-31 19:50:58 +0000730static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700731 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732{
733 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000736 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000738 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 bool dummy_wrb, stopped = false;
740
Somnath Kotur93040ae2012-06-26 22:32:10 +0000741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
743
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000746 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 is_ipv4_pkt(skb)) {
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 }
752
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000760 if (unlikely(!skb))
761 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762 }
763
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765
Sathya Perla3c8def92011-06-12 20:01:58 +0000766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000767 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000768 int gso_segs = skb_shinfo(skb)->gso_segs;
769
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000770 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
Sathya Perla7101e112010-03-22 20:41:12 +0000778 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000782 stopped = true;
783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000785 be_txq_notify(adapter, txq->id, wrb_cnt);
786
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000792tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 */
Sathya Perla10329df2012-06-05 19:37:18 +0000818static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
Sathya Perla10329df2012-06-05 19:37:18 +0000820 u16 vids[BE_NUM_VLANS_SUPPORTED];
821 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000822 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000823
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
826 return 0;
827
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
830
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000834 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000835
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000837 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000838
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
840 if (status) {
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000845
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000847
848set_vlan_promisc:
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 NULL, 0, 1, 1);
851 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852}
853
Jiri Pirko8e586132011-12-08 19:52:37 -0500854static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855{
856 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000857 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000863
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000866 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500867
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000868 if (!status)
869 adapter->vlans_added++;
870 else
871 adapter->vlan_tag[vid] = 0;
872ret:
873 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874}
875
Jiri Pirko8e586132011-12-08 19:52:37 -0500876static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877{
878 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000879 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000881 if (!be_physfn(adapter)) {
882 status = -EINVAL;
883 goto ret;
884 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000885
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000887 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000888 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500889
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000890 if (!status)
891 adapter->vlans_added--;
892 else
893 adapter->vlan_tag[vid] = 1;
894ret:
895 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896}
897
Sathya Perlaa54769f2011-10-24 02:45:00 +0000898static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899{
900 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000901 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700902
903 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000905 adapter->promiscuous = true;
906 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000908
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300909 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000913
914 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000915 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000916 }
917
Sathya Perlae7b909a2009-11-22 22:01:10 +0000918 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000919 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000920 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000922 goto done;
923 }
924
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
928
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
932 }
933
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
937 goto done;
938 }
939
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 adapter->if_handle,
944 &adapter->pmac_id[adapter->uc_macs], 0);
945 }
946 }
947
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
951 if (status) {
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000956done:
957 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958}
959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000964 int status;
965
Sathya Perla11ac75e2011-12-13 00:58:50 +0000966 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000967 return -EPERM;
968
Sathya Perla11ac75e2011-12-13 00:58:50 +0000969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000970 return -EINVAL;
971
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000980 }
981
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000982 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000987
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000988 return status;
989}
990
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000991static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000996
Sathya Perla11ac75e2011-12-13 00:58:50 +0000997 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000998 return -EPERM;
999
Sathya Perla11ac75e2011-12-13 00:58:50 +00001000 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001001 return -EINVAL;
1002
1003 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001006 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001008
1009 return 0;
1010}
1011
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001012static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1014{
1015 struct be_adapter *adapter = netdev_priv(netdev);
1016 int status = 0;
1017
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001019 return -EPERM;
1020
Sathya Perla11ac75e2011-12-13 00:58:50 +00001021 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001022 return -EINVAL;
1023
1024 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1031 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001032 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001033 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001034 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001038 }
1039
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001040
1041 if (status)
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1044 return status;
1045}
1046
Ajit Khapardee1d18732010-07-23 01:52:13 +00001047static int be_set_vf_tx_rate(struct net_device *netdev,
1048 int vf, int rate)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 int status = 0;
1052
Sathya Perla11ac75e2011-12-13 00:58:50 +00001053 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001054 return -EPERM;
1055
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001056 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001057 return -EINVAL;
1058
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1062 return -EINVAL;
1063 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001064
Ajit Khaparde856c4012011-02-11 13:32:32 +00001065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001066
1067 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001068 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001069 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001070 else
1071 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001072 return status;
1073}
1074
Sathya Perla39f1d942012-05-08 19:41:24 +00001075static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076{
1077 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001078 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001079 u16 offset, stride;
1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001082 if (!pos)
1083 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001089 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001090 vfs++;
1091 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1092 assigned_vfs++;
1093 }
1094 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1095 }
1096 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1097}
1098
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001099static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001101 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001102 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001103 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001104 u64 pkts;
1105 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001106
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001107 if (!eqo->enable_aic) {
1108 eqd = eqo->eqd;
1109 goto modify_eqd;
1110 }
1111
1112 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001113 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001115 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1116
Sathya Perla4097f662009-03-24 16:40:13 -07001117 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001118 if (time_before(now, stats->rx_jiffies)) {
1119 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001120 return;
1121 }
1122
Sathya Perlaac124ff2011-07-25 19:10:14 +00001123 /* Update once a second */
1124 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001125 return;
1126
Sathya Perlaab1594e2011-07-25 19:10:15 +00001127 do {
1128 start = u64_stats_fetch_begin_bh(&stats->sync);
1129 pkts = stats->rx_pkts;
1130 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1131
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001132 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001133 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001134 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001135 eqd = (stats->rx_pps / 110000) << 3;
1136 eqd = min(eqd, eqo->max_eqd);
1137 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001138 if (eqd < 10)
1139 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001140
1141modify_eqd:
1142 if (eqd != eqo->cur_eqd) {
1143 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1144 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001145 }
Sathya Perla4097f662009-03-24 16:40:13 -07001146}
1147
Sathya Perla3abcded2010-10-03 22:12:27 -07001148static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001149 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001150{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001151 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001152
Sathya Perlaab1594e2011-07-25 19:10:15 +00001153 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001154 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001155 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001156 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001157 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001158 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001159 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001160 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001161 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001162}
1163
Sathya Perla2e588f82011-03-11 02:49:26 +00001164static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001165{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001166 /* L4 checksum is not reliable for non TCP/UDP packets.
1167 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001168 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1169 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001170}
1171
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001172static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1173 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001175 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001177 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178
Sathya Perla3abcded2010-10-03 22:12:27 -07001179 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180 BUG_ON(!rx_page_info->page);
1181
Ajit Khaparde205859a2010-02-09 01:34:21 +00001182 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001183 dma_unmap_page(&adapter->pdev->dev,
1184 dma_unmap_addr(rx_page_info, bus),
1185 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001186 rx_page_info->last_page_user = false;
1187 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188
1189 atomic_dec(&rxq->used);
1190 return rx_page_info;
1191}
1192
1193/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001194static void be_rx_compl_discard(struct be_rx_obj *rxo,
1195 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196{
Sathya Perla3abcded2010-10-03 22:12:27 -07001197 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001199 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001201 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001202 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001203 put_page(page_info->page);
1204 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001205 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206 }
1207}
1208
1209/*
1210 * skb_fill_rx_data forms a complete skb for an ether frame
1211 * indicated by rxcp.
1212 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001213static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1214 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215{
Sathya Perla3abcded2010-10-03 22:12:27 -07001216 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001218 u16 i, j;
1219 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220 u8 *start;
1221
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001222 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223 start = page_address(page_info->page) + page_info->page_offset;
1224 prefetch(start);
1225
1226 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001228
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229 skb->len = curr_frag_len;
1230 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001231 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232 /* Complete packet has now been moved to data */
1233 put_page(page_info->page);
1234 skb->data_len = 0;
1235 skb->tail += curr_frag_len;
1236 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001237 hdr_len = ETH_HLEN;
1238 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001240 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 skb_shinfo(skb)->frags[0].page_offset =
1242 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001243 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001244 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001245 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 skb->tail += hdr_len;
1247 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001248 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249
Sathya Perla2e588f82011-03-11 02:49:26 +00001250 if (rxcp->pkt_size <= rx_frag_size) {
1251 BUG_ON(rxcp->num_rcvd != 1);
1252 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001253 }
1254
1255 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001256 index_inc(&rxcp->rxq_idx, rxq->len);
1257 remaining = rxcp->pkt_size - curr_frag_len;
1258 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001259 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001260 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001262 /* Coalesce all frags from the same physical page in one slot */
1263 if (page_info->page_offset == 0) {
1264 /* Fresh page */
1265 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001266 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001267 skb_shinfo(skb)->frags[j].page_offset =
1268 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001269 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001270 skb_shinfo(skb)->nr_frags++;
1271 } else {
1272 put_page(page_info->page);
1273 }
1274
Eric Dumazet9e903e02011-10-18 21:00:24 +00001275 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276 skb->len += curr_frag_len;
1277 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001278 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001279 remaining -= curr_frag_len;
1280 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001281 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001283 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284}
1285
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001286/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001287static void be_rx_compl_process(struct be_rx_obj *rxo,
1288 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001290 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001291 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001293
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001294 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001295 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001296 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001297 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001298 return;
1299 }
1300
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001301 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001302
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001303 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001304 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001305 else
1306 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001308 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001309 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001310 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001311 skb->rxhash = rxcp->rss_hash;
1312
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313
Jiri Pirko343e43c2011-08-25 02:50:51 +00001314 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001315 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1316
1317 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318}
1319
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001320/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001321void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1322 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001323{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001324 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001326 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001327 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001328 u16 remaining, curr_frag_len;
1329 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001330
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001331 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001332 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001333 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001334 return;
1335 }
1336
Sathya Perla2e588f82011-03-11 02:49:26 +00001337 remaining = rxcp->pkt_size;
1338 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001339 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340
1341 curr_frag_len = min(remaining, rx_frag_size);
1342
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001343 /* Coalesce all frags from the same physical page in one slot */
1344 if (i == 0 || page_info->page_offset == 0) {
1345 /* First frag or Fresh page */
1346 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001347 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001348 skb_shinfo(skb)->frags[j].page_offset =
1349 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001350 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001351 } else {
1352 put_page(page_info->page);
1353 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001354 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001355 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001357 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 memset(page_info, 0, sizeof(*page_info));
1359 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001360 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001362 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001363 skb->len = rxcp->pkt_size;
1364 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001365 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001366 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001367 if (adapter->netdev->features & NETIF_F_RXHASH)
1368 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001369
Jiri Pirko343e43c2011-08-25 02:50:51 +00001370 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001371 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1372
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001373 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374}
1375
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001376static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1377 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378{
Sathya Perla2e588f82011-03-11 02:49:26 +00001379 rxcp->pkt_size =
1380 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1381 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1382 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1383 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001384 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001385 rxcp->ip_csum =
1386 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1387 rxcp->l4_csum =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1389 rxcp->ipv6 =
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1391 rxcp->rxq_idx =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1393 rxcp->num_rcvd =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1395 rxcp->pkt_type =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001397 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001398 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001399 if (rxcp->vlanf) {
1400 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001401 compl);
1402 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1403 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001404 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001405 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001406}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001408static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1409 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001410{
1411 rxcp->pkt_size =
1412 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1413 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1414 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1415 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001416 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001417 rxcp->ip_csum =
1418 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1419 rxcp->l4_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1421 rxcp->ipv6 =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1423 rxcp->rxq_idx =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1425 rxcp->num_rcvd =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1427 rxcp->pkt_type =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001429 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001430 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001431 if (rxcp->vlanf) {
1432 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001433 compl);
1434 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1435 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001436 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001437 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001438}
1439
1440static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1441{
1442 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1443 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1444 struct be_adapter *adapter = rxo->adapter;
1445
1446 /* For checking the valid bit it is Ok to use either definition as the
1447 * valid bit is at the same position in both v0 and v1 Rx compl */
1448 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449 return NULL;
1450
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001451 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001452 be_dws_le_to_cpu(compl, sizeof(*compl));
1453
1454 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001455 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001456 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001457 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001458
Sathya Perla15d72182011-03-21 20:49:26 +00001459 if (rxcp->vlanf) {
1460 /* vlanf could be wrongly set in some cards.
1461 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001462 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001463 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001464
Sathya Perla15d72182011-03-21 20:49:26 +00001465 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001466 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001467
Somnath Kotur939cf302011-08-18 21:51:49 -07001468 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001469 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001470 rxcp->vlanf = 0;
1471 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001472
1473 /* As the compl has been parsed, reset it; we wont touch it again */
1474 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475
Sathya Perla3abcded2010-10-03 22:12:27 -07001476 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 return rxcp;
1478}
1479
Eric Dumazet1829b082011-03-01 05:48:12 +00001480static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001483
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001485 gfp |= __GFP_COMP;
1486 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487}
1488
1489/*
1490 * Allocate a page, split it to fragments of size rx_frag_size and post as
1491 * receive buffers to BE
1492 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001493static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494{
Sathya Perla3abcded2010-10-03 22:12:27 -07001495 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001496 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001497 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498 struct page *pagep = NULL;
1499 struct be_eth_rx_d *rxd;
1500 u64 page_dmaaddr = 0, frag_dmaaddr;
1501 u32 posted, page_offset = 0;
1502
Sathya Perla3abcded2010-10-03 22:12:27 -07001503 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1505 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001506 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001508 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 break;
1510 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001511 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1512 0, adapter->big_page_size,
1513 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514 page_info->page_offset = 0;
1515 } else {
1516 get_page(pagep);
1517 page_info->page_offset = page_offset + rx_frag_size;
1518 }
1519 page_offset = page_info->page_offset;
1520 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001521 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1523
1524 rxd = queue_head_node(rxq);
1525 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1526 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527
1528 /* Any space left in the current big page for another frag? */
1529 if ((page_offset + rx_frag_size + rx_frag_size) >
1530 adapter->big_page_size) {
1531 pagep = NULL;
1532 page_info->last_page_user = true;
1533 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001534
1535 prev_page_info = page_info;
1536 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001537 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538 }
1539 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001540 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541
1542 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001544 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001545 } else if (atomic_read(&rxq->used) == 0) {
1546 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001547 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549}
1550
Sathya Perla5fb379e2009-06-18 00:02:59 +00001551static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1554
1555 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1556 return NULL;
1557
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001558 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1560
1561 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1562
1563 queue_tail_inc(tx_cq);
1564 return txcp;
1565}
1566
Sathya Perla3c8def92011-06-12 20:01:58 +00001567static u16 be_tx_compl_process(struct be_adapter *adapter,
1568 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569{
Sathya Perla3c8def92011-06-12 20:01:58 +00001570 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001571 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001572 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001574 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1575 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001577 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001579 sent_skbs[txq->tail] = NULL;
1580
1581 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001582 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001584 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001586 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001587 unmap_tx_frag(&adapter->pdev->dev, wrb,
1588 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001589 unmap_skb_hdr = false;
1590
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591 num_wrbs++;
1592 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001593 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001596 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597}
1598
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001599/* Return the number of events in the event queue */
1600static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001601{
1602 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001603 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001604
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001605 do {
1606 eqe = queue_tail_node(&eqo->q);
1607 if (eqe->evt == 0)
1608 break;
1609
1610 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001611 eqe->evt = 0;
1612 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001613 queue_tail_inc(&eqo->q);
1614 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001615
1616 return num;
1617}
1618
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001619static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001620{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001621 bool rearm = false;
1622 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001623
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001624 /* Deal with any spurious interrupts that come without events */
1625 if (!num)
1626 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001627
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001628 if (num || msix_enabled(eqo->adapter))
1629 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1630
Sathya Perla859b1e42009-08-10 03:43:51 +00001631 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001632 napi_schedule(&eqo->napi);
1633
1634 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001635}
1636
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001637/* Leaves the EQ is disarmed state */
1638static void be_eq_clean(struct be_eq_obj *eqo)
1639{
1640 int num = events_get(eqo);
1641
1642 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1643}
1644
1645static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646{
1647 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001648 struct be_queue_info *rxq = &rxo->q;
1649 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001650 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651 u16 tail;
1652
1653 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001654 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001655 be_rx_compl_discard(rxo, rxcp);
1656 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657 }
1658
1659 /* Then free posted rx buffer that were not used */
1660 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001661 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001662 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663 put_page(page_info->page);
1664 memset(page_info, 0, sizeof(*page_info));
1665 }
1666 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001667 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668}
1669
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001670static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001672 struct be_tx_obj *txo;
1673 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001674 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001675 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001676 struct sk_buff *sent_skb;
1677 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001678 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679
Sathya Perlaa8e91792009-08-10 03:42:43 +00001680 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1681 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001682 pending_txqs = adapter->num_tx_qs;
1683
1684 for_all_tx_queues(adapter, txo, i) {
1685 txq = &txo->q;
1686 while ((txcp = be_tx_compl_get(&txo->cq))) {
1687 end_idx =
1688 AMAP_GET_BITS(struct amap_eth_tx_compl,
1689 wrb_index, txcp);
1690 num_wrbs += be_tx_compl_process(adapter, txo,
1691 end_idx);
1692 cmpl++;
1693 }
1694 if (cmpl) {
1695 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1696 atomic_sub(num_wrbs, &txq->used);
1697 cmpl = 0;
1698 num_wrbs = 0;
1699 }
1700 if (atomic_read(&txq->used) == 0)
1701 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001702 }
1703
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001704 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001705 break;
1706
1707 mdelay(1);
1708 } while (true);
1709
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001710 for_all_tx_queues(adapter, txo, i) {
1711 txq = &txo->q;
1712 if (atomic_read(&txq->used))
1713 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1714 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001715
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001716 /* free posted tx for which compls will never arrive */
1717 while (atomic_read(&txq->used)) {
1718 sent_skb = txo->sent_skb_list[txq->tail];
1719 end_idx = txq->tail;
1720 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1721 &dummy_wrb);
1722 index_adv(&end_idx, num_wrbs - 1, txq->len);
1723 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1724 atomic_sub(num_wrbs, &txq->used);
1725 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001726 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727}
1728
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001729static void be_evt_queues_destroy(struct be_adapter *adapter)
1730{
1731 struct be_eq_obj *eqo;
1732 int i;
1733
1734 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001735 if (eqo->q.created) {
1736 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001737 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001738 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001739 be_queue_free(adapter, &eqo->q);
1740 }
1741}
1742
1743static int be_evt_queues_create(struct be_adapter *adapter)
1744{
1745 struct be_queue_info *eq;
1746 struct be_eq_obj *eqo;
1747 int i, rc;
1748
1749 adapter->num_evt_qs = num_irqs(adapter);
1750
1751 for_all_evt_queues(adapter, eqo, i) {
1752 eqo->adapter = adapter;
1753 eqo->tx_budget = BE_TX_BUDGET;
1754 eqo->idx = i;
1755 eqo->max_eqd = BE_MAX_EQD;
1756 eqo->enable_aic = true;
1757
1758 eq = &eqo->q;
1759 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1760 sizeof(struct be_eq_entry));
1761 if (rc)
1762 return rc;
1763
1764 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1765 if (rc)
1766 return rc;
1767 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001768 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001769}
1770
Sathya Perla5fb379e2009-06-18 00:02:59 +00001771static void be_mcc_queues_destroy(struct be_adapter *adapter)
1772{
1773 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001774
Sathya Perla8788fdc2009-07-27 22:52:03 +00001775 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001776 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001777 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001778 be_queue_free(adapter, q);
1779
Sathya Perla8788fdc2009-07-27 22:52:03 +00001780 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001781 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001782 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001783 be_queue_free(adapter, q);
1784}
1785
1786/* Must be called only after TX qs are created as MCC shares TX EQ */
1787static int be_mcc_queues_create(struct be_adapter *adapter)
1788{
1789 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001790
Sathya Perla8788fdc2009-07-27 22:52:03 +00001791 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001792 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001793 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001794 goto err;
1795
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001796 /* Use the default EQ for MCC completions */
1797 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001798 goto mcc_cq_free;
1799
Sathya Perla8788fdc2009-07-27 22:52:03 +00001800 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001801 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1802 goto mcc_cq_destroy;
1803
Sathya Perla8788fdc2009-07-27 22:52:03 +00001804 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001805 goto mcc_q_free;
1806
1807 return 0;
1808
1809mcc_q_free:
1810 be_queue_free(adapter, q);
1811mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001812 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001813mcc_cq_free:
1814 be_queue_free(adapter, cq);
1815err:
1816 return -1;
1817}
1818
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819static void be_tx_queues_destroy(struct be_adapter *adapter)
1820{
1821 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001822 struct be_tx_obj *txo;
1823 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Sathya Perla3c8def92011-06-12 20:01:58 +00001825 for_all_tx_queues(adapter, txo, i) {
1826 q = &txo->q;
1827 if (q->created)
1828 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1829 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830
Sathya Perla3c8def92011-06-12 20:01:58 +00001831 q = &txo->cq;
1832 if (q->created)
1833 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1834 be_queue_free(adapter, q);
1835 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001836}
1837
Sathya Perladafc0fe2011-10-24 02:45:02 +00001838static int be_num_txqs_want(struct be_adapter *adapter)
1839{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001840 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1841 be_is_mc(adapter) ||
1842 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perla39f1d942012-05-08 19:41:24 +00001843 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001844 return 1;
1845 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001846 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001847}
1848
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001849static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001851 struct be_queue_info *cq, *eq;
1852 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001853 struct be_tx_obj *txo;
1854 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
Sathya Perladafc0fe2011-10-24 02:45:02 +00001856 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001857 if (adapter->num_tx_qs != MAX_TX_QS) {
1858 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001859 netif_set_real_num_tx_queues(adapter->netdev,
1860 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001861 rtnl_unlock();
1862 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001863
Sathya Perla3c8def92011-06-12 20:01:58 +00001864 for_all_tx_queues(adapter, txo, i) {
1865 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001866 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1867 sizeof(struct be_eth_tx_compl));
1868 if (status)
1869 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001871 /* If num_evt_qs is less than num_tx_qs, then more than
1872 * one txq share an eq
1873 */
1874 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1875 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1876 if (status)
1877 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001878 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880}
1881
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001882static int be_tx_qs_create(struct be_adapter *adapter)
1883{
1884 struct be_tx_obj *txo;
1885 int i, status;
1886
1887 for_all_tx_queues(adapter, txo, i) {
1888 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1889 sizeof(struct be_eth_wrb));
1890 if (status)
1891 return status;
1892
1893 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1894 if (status)
1895 return status;
1896 }
1897
Sathya Perlad3791422012-09-28 04:39:44 +00001898 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1899 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001900 return 0;
1901}
1902
1903static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904{
1905 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001906 struct be_rx_obj *rxo;
1907 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908
Sathya Perla3abcded2010-10-03 22:12:27 -07001909 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001910 q = &rxo->cq;
1911 if (q->created)
1912 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1913 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915}
1916
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001918{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001919 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001920 struct be_rx_obj *rxo;
1921 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001923 /* We'll create as many RSS rings as there are irqs.
1924 * But when there's only one irq there's no use creating RSS rings
1925 */
1926 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1927 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001928 if (adapter->num_rx_qs != MAX_RX_QS) {
1929 rtnl_lock();
1930 netif_set_real_num_rx_queues(adapter->netdev,
1931 adapter->num_rx_qs);
1932 rtnl_unlock();
1933 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001934
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001936 for_all_rx_queues(adapter, rxo, i) {
1937 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001938 cq = &rxo->cq;
1939 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1940 sizeof(struct be_eth_rx_compl));
1941 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001942 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001944 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1945 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001946 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001947 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001948 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949
Sathya Perlad3791422012-09-28 04:39:44 +00001950 dev_info(&adapter->pdev->dev,
1951 "created %d RSS queue(s) and 1 default RX queue\n",
1952 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001953 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001954}
1955
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956static irqreturn_t be_intx(int irq, void *dev)
1957{
1958 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001961 /* With INTx only one EQ is used */
1962 num_evts = event_handle(&adapter->eq_obj[0]);
1963 if (num_evts)
1964 return IRQ_HANDLED;
1965 else
1966 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967}
1968
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001971 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001973 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974 return IRQ_HANDLED;
1975}
1976
Sathya Perla2e588f82011-03-11 02:49:26 +00001977static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978{
Sathya Perla2e588f82011-03-11 02:49:26 +00001979 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001980}
1981
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1983 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984{
Sathya Perla3abcded2010-10-03 22:12:27 -07001985 struct be_adapter *adapter = rxo->adapter;
1986 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001987 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 u32 work_done;
1989
1990 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992 if (!rxcp)
1993 break;
1994
Sathya Perla12004ae2011-08-02 19:57:46 +00001995 /* Is it a flush compl that has no data */
1996 if (unlikely(rxcp->num_rcvd == 0))
1997 goto loop_continue;
1998
1999 /* Discard compl with partial DMA Lancer B0 */
2000 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002001 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002002 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002003 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002004
Sathya Perla12004ae2011-08-02 19:57:46 +00002005 /* On BE drop pkts that arrive due to imperfect filtering in
2006 * promiscuous mode on some skews
2007 */
2008 if (unlikely(rxcp->port != adapter->port_num &&
2009 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002010 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002011 goto loop_continue;
2012 }
2013
2014 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002016 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002017 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002018loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002019 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 }
2021
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002022 if (work_done) {
2023 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2026 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002028
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029 return work_done;
2030}
2031
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2033 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038 for (work_done = 0; work_done < budget; work_done++) {
2039 txcp = be_tx_compl_get(&txo->cq);
2040 if (!txcp)
2041 break;
2042 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002043 AMAP_GET_BITS(struct amap_eth_tx_compl,
2044 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045 }
2046
2047 if (work_done) {
2048 be_cq_notify(adapter, txo->cq.id, true, work_done);
2049 atomic_sub(num_wrbs, &txo->q.used);
2050
2051 /* As Tx wrbs have been freed up, wake up netdev queue
2052 * if it was stopped due to lack of tx wrbs. */
2053 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2054 atomic_read(&txo->q.used) < txo->q.len / 2) {
2055 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002056 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002057
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2059 tx_stats(txo)->tx_compl += work_done;
2060 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2061 }
2062 return (work_done < budget); /* Done */
2063}
Sathya Perla3c8def92011-06-12 20:01:58 +00002064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002065int be_poll(struct napi_struct *napi, int budget)
2066{
2067 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2068 struct be_adapter *adapter = eqo->adapter;
2069 int max_work = 0, work, i;
2070 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002071
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002072 /* Process all TXQs serviced by this EQ */
2073 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2074 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2075 eqo->tx_budget, i);
2076 if (!tx_done)
2077 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078 }
2079
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002080 /* This loop will iterate twice for EQ0 in which
2081 * completions of the last RXQ (default one) are also processed
2082 * For other EQs the loop iterates only once
2083 */
2084 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2085 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2086 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002087 }
2088
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002089 if (is_mcc_eqo(eqo))
2090 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002091
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002092 if (max_work < budget) {
2093 napi_complete(napi);
2094 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2095 } else {
2096 /* As we'll continue in polling mode, count and clear events */
2097 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002098 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002100}
2101
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002102void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002103{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002104 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2105 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002106 u32 i;
2107
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002108 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002109 return;
2110
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002111 if (lancer_chip(adapter)) {
2112 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2113 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2114 sliport_err1 = ioread32(adapter->db +
2115 SLIPORT_ERROR1_OFFSET);
2116 sliport_err2 = ioread32(adapter->db +
2117 SLIPORT_ERROR2_OFFSET);
2118 }
2119 } else {
2120 pci_read_config_dword(adapter->pdev,
2121 PCICFG_UE_STATUS_LOW, &ue_lo);
2122 pci_read_config_dword(adapter->pdev,
2123 PCICFG_UE_STATUS_HIGH, &ue_hi);
2124 pci_read_config_dword(adapter->pdev,
2125 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2126 pci_read_config_dword(adapter->pdev,
2127 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002128
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002129 ue_lo = (ue_lo & ~ue_lo_mask);
2130 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002131 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002132
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002133 /* On certain platforms BE hardware can indicate spurious UEs.
2134 * Allow the h/w to stop working completely in case of a real UE.
2135 * Hence not setting the hw_error for UE detection.
2136 */
2137 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002138 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002139 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002140 "Error detected in the card\n");
2141 }
2142
2143 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2144 dev_err(&adapter->pdev->dev,
2145 "ERR: sliport status 0x%x\n", sliport_status);
2146 dev_err(&adapter->pdev->dev,
2147 "ERR: sliport error1 0x%x\n", sliport_err1);
2148 dev_err(&adapter->pdev->dev,
2149 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002150 }
2151
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002152 if (ue_lo) {
2153 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2154 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002155 dev_err(&adapter->pdev->dev,
2156 "UE: %s bit set\n", ue_status_low_desc[i]);
2157 }
2158 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002159
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002160 if (ue_hi) {
2161 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2162 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002163 dev_err(&adapter->pdev->dev,
2164 "UE: %s bit set\n", ue_status_hi_desc[i]);
2165 }
2166 }
2167
2168}
2169
Sathya Perla8d56ff12009-11-22 22:02:26 +00002170static void be_msix_disable(struct be_adapter *adapter)
2171{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002172 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002173 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002174 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002175 }
2176}
2177
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002178static uint be_num_rss_want(struct be_adapter *adapter)
2179{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002180 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002181
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002183 (lancer_chip(adapter) ||
2184 (!sriov_want(adapter) && be_physfn(adapter)))) {
2185 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002186 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2187 }
2188 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189}
2190
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191static void be_msix_enable(struct be_adapter *adapter)
2192{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002193#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002194 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002195 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 /* If RSS queues are not used, need a vec for default RX Q */
2198 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002199 if (be_roce_supported(adapter)) {
2200 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2201 (num_online_cpus() + 1));
2202 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2203 num_vec += num_roce_vec;
2204 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2205 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002206 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002207
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002208 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002209 adapter->msix_entries[i].entry = i;
2210
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002211 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 if (status == 0) {
2213 goto done;
2214 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002215 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002216 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002217 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002218 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002219 }
Sathya Perlad3791422012-09-28 04:39:44 +00002220
2221 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002222 return;
2223done:
Parav Pandit045508a2012-03-26 14:27:13 +00002224 if (be_roce_supported(adapter)) {
2225 if (num_vec > num_roce_vec) {
2226 adapter->num_msix_vec = num_vec - num_roce_vec;
2227 adapter->num_msix_roce_vec =
2228 num_vec - adapter->num_msix_vec;
2229 } else {
2230 adapter->num_msix_vec = num_vec;
2231 adapter->num_msix_roce_vec = 0;
2232 }
2233 } else
2234 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002235 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002236 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237}
2238
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002239static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243}
2244
2245static int be_msix_register(struct be_adapter *adapter)
2246{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002247 struct net_device *netdev = adapter->netdev;
2248 struct be_eq_obj *eqo;
2249 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251 for_all_evt_queues(adapter, eqo, i) {
2252 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2253 vec = be_msix_vec_get(adapter, eqo);
2254 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002255 if (status)
2256 goto err_msix;
2257 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002258
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002260err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2262 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2263 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2264 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002265 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266 return status;
2267}
2268
2269static int be_irq_register(struct be_adapter *adapter)
2270{
2271 struct net_device *netdev = adapter->netdev;
2272 int status;
2273
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002274 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275 status = be_msix_register(adapter);
2276 if (status == 0)
2277 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002278 /* INTx is not supported for VF */
2279 if (!be_physfn(adapter))
2280 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281 }
2282
2283 /* INTx */
2284 netdev->irq = adapter->pdev->irq;
2285 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2286 adapter);
2287 if (status) {
2288 dev_err(&adapter->pdev->dev,
2289 "INTx request IRQ failed - err %d\n", status);
2290 return status;
2291 }
2292done:
2293 adapter->isr_registered = true;
2294 return 0;
2295}
2296
2297static void be_irq_unregister(struct be_adapter *adapter)
2298{
2299 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002301 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302
2303 if (!adapter->isr_registered)
2304 return;
2305
2306 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002307 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308 free_irq(netdev->irq, adapter);
2309 goto done;
2310 }
2311
2312 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313 for_all_evt_queues(adapter, eqo, i)
2314 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002315
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316done:
2317 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318}
2319
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002320static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002321{
2322 struct be_queue_info *q;
2323 struct be_rx_obj *rxo;
2324 int i;
2325
2326 for_all_rx_queues(adapter, rxo, i) {
2327 q = &rxo->q;
2328 if (q->created) {
2329 be_cmd_rxq_destroy(adapter, q);
2330 /* After the rxq is invalidated, wait for a grace time
2331 * of 1ms for all dma to end and the flush compl to
2332 * arrive
2333 */
2334 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002336 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002337 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002338 }
2339}
2340
Sathya Perla889cd4b2010-05-30 23:33:45 +00002341static int be_close(struct net_device *netdev)
2342{
2343 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002344 struct be_eq_obj *eqo;
2345 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002346
Parav Pandit045508a2012-03-26 14:27:13 +00002347 be_roce_dev_close(adapter);
2348
Sathya Perla889cd4b2010-05-30 23:33:45 +00002349 be_async_mcc_disable(adapter);
2350
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002351 if (!lancer_chip(adapter))
2352 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002353
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002354 for_all_evt_queues(adapter, eqo, i) {
2355 napi_disable(&eqo->napi);
2356 if (msix_enabled(adapter))
2357 synchronize_irq(be_msix_vec_get(adapter, eqo));
2358 else
2359 synchronize_irq(netdev->irq);
2360 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002361 }
2362
Sathya Perla889cd4b2010-05-30 23:33:45 +00002363 be_irq_unregister(adapter);
2364
Sathya Perla889cd4b2010-05-30 23:33:45 +00002365 /* Wait for all pending tx completions to arrive so that
2366 * all tx skbs are freed.
2367 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002368 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002369
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002370 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002371 return 0;
2372}
2373
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002374static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002375{
2376 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002377 int rc, i, j;
2378 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002379
2380 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2382 sizeof(struct be_eth_rx_d));
2383 if (rc)
2384 return rc;
2385 }
2386
2387 /* The FW would like the default RXQ to be created first */
2388 rxo = default_rxo(adapter);
2389 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2390 adapter->if_handle, false, &rxo->rss_id);
2391 if (rc)
2392 return rc;
2393
2394 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002395 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 rx_frag_size, adapter->if_handle,
2397 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002398 if (rc)
2399 return rc;
2400 }
2401
2402 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002403 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2404 for_all_rss_queues(adapter, rxo, i) {
2405 if ((j + i) >= 128)
2406 break;
2407 rsstable[j + i] = rxo->rss_id;
2408 }
2409 }
2410 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002411 if (rc)
2412 return rc;
2413 }
2414
2415 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002416 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002417 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002418 return 0;
2419}
2420
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002421static int be_open(struct net_device *netdev)
2422{
2423 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002424 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002425 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002427 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002428 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002429
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002431 if (status)
2432 goto err;
2433
Sathya Perla5fb379e2009-06-18 00:02:59 +00002434 be_irq_register(adapter);
2435
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002436 if (!lancer_chip(adapter))
2437 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002438
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002439 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002440 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002441
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442 for_all_tx_queues(adapter, txo, i)
2443 be_cq_notify(adapter, txo->cq.id, true, 0);
2444
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002445 be_async_mcc_enable(adapter);
2446
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002447 for_all_evt_queues(adapter, eqo, i) {
2448 napi_enable(&eqo->napi);
2449 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2450 }
2451
Sathya Perla323ff712012-09-28 04:39:43 +00002452 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002453 if (!status)
2454 be_link_status_update(adapter, link_status);
2455
Parav Pandit045508a2012-03-26 14:27:13 +00002456 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002457 return 0;
2458err:
2459 be_close(adapter->netdev);
2460 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002461}
2462
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002463static int be_setup_wol(struct be_adapter *adapter, bool enable)
2464{
2465 struct be_dma_mem cmd;
2466 int status = 0;
2467 u8 mac[ETH_ALEN];
2468
2469 memset(mac, 0, ETH_ALEN);
2470
2471 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002472 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2473 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002474 if (cmd.va == NULL)
2475 return -1;
2476 memset(cmd.va, 0, cmd.size);
2477
2478 if (enable) {
2479 status = pci_write_config_dword(adapter->pdev,
2480 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2481 if (status) {
2482 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002483 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002484 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2485 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002486 return status;
2487 }
2488 status = be_cmd_enable_magic_wol(adapter,
2489 adapter->netdev->dev_addr, &cmd);
2490 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2491 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2492 } else {
2493 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2494 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2495 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2496 }
2497
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002498 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002499 return status;
2500}
2501
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002502/*
2503 * Generate a seed MAC address from the PF MAC Address using jhash.
2504 * MAC Address for VFs are assigned incrementally starting from the seed.
2505 * These addresses are programmed in the ASIC by the PF and the VF driver
2506 * queries for the MAC address during its probe.
2507 */
2508static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2509{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002510 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002511 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002512 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002513 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002514
2515 be_vf_eth_addr_generate(adapter, mac);
2516
Sathya Perla11ac75e2011-12-13 00:58:50 +00002517 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002518 if (lancer_chip(adapter)) {
2519 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2520 } else {
2521 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002522 vf_cfg->if_handle,
2523 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002524 }
2525
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002526 if (status)
2527 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002528 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002529 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002530 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002531
2532 mac[5] += 1;
2533 }
2534 return status;
2535}
2536
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002537static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002538{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002539 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002540 u32 vf;
2541
Sathya Perla39f1d942012-05-08 19:41:24 +00002542 if (be_find_vfs(adapter, ASSIGNED)) {
2543 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2544 goto done;
2545 }
2546
Sathya Perla11ac75e2011-12-13 00:58:50 +00002547 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002548 if (lancer_chip(adapter))
2549 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2550 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002551 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2552 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002553
Sathya Perla11ac75e2011-12-13 00:58:50 +00002554 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2555 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002556 pci_disable_sriov(adapter->pdev);
2557done:
2558 kfree(adapter->vf_cfg);
2559 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002560}
2561
Sathya Perlaa54769f2011-10-24 02:45:00 +00002562static int be_clear(struct be_adapter *adapter)
2563{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002564 int i = 1;
2565
Sathya Perla191eb752012-02-23 18:50:13 +00002566 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2567 cancel_delayed_work_sync(&adapter->work);
2568 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2569 }
2570
Sathya Perla11ac75e2011-12-13 00:58:50 +00002571 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002572 be_vf_clear(adapter);
2573
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002574 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2575 be_cmd_pmac_del(adapter, adapter->if_handle,
2576 adapter->pmac_id[i], 0);
2577
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002578 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002579
2580 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002582 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002583 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002584
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002585 kfree(adapter->pmac_id);
2586 adapter->pmac_id = NULL;
2587
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002588 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002589 return 0;
2590}
2591
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002592static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2593 u32 *cap_flags, u8 domain)
2594{
2595 bool profile_present = false;
2596 int status;
2597
2598 if (lancer_chip(adapter)) {
2599 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2600 if (!status)
2601 profile_present = true;
2602 }
2603
2604 if (!profile_present)
2605 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2606 BE_IF_FLAGS_MULTICAST;
2607}
2608
Sathya Perla39f1d942012-05-08 19:41:24 +00002609static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002610{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002611 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002612 int vf;
2613
Sathya Perla39f1d942012-05-08 19:41:24 +00002614 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2615 GFP_KERNEL);
2616 if (!adapter->vf_cfg)
2617 return -ENOMEM;
2618
Sathya Perla11ac75e2011-12-13 00:58:50 +00002619 for_all_vfs(adapter, vf_cfg, vf) {
2620 vf_cfg->if_handle = -1;
2621 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002622 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002623 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002624}
2625
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002626static int be_vf_setup(struct be_adapter *adapter)
2627{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002628 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002629 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002630 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002631 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002632 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002633
Sathya Perla39f1d942012-05-08 19:41:24 +00002634 enabled_vfs = be_find_vfs(adapter, ENABLED);
2635 if (enabled_vfs) {
2636 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2637 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2638 return 0;
2639 }
2640
2641 if (num_vfs > adapter->dev_num_vfs) {
2642 dev_warn(dev, "Device supports %d VFs and not %d\n",
2643 adapter->dev_num_vfs, num_vfs);
2644 num_vfs = adapter->dev_num_vfs;
2645 }
2646
2647 status = pci_enable_sriov(adapter->pdev, num_vfs);
2648 if (!status) {
2649 adapter->num_vfs = num_vfs;
2650 } else {
2651 /* Platform doesn't support SRIOV though device supports it */
2652 dev_warn(dev, "SRIOV enable failed\n");
2653 return 0;
2654 }
2655
2656 status = be_vf_setup_init(adapter);
2657 if (status)
2658 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002659
Sathya Perla11ac75e2011-12-13 00:58:50 +00002660 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002661 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2662
2663 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2664 BE_IF_FLAGS_BROADCAST |
2665 BE_IF_FLAGS_MULTICAST);
2666
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002667 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2668 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002669 if (status)
2670 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002671 }
2672
Sathya Perla39f1d942012-05-08 19:41:24 +00002673 if (!enabled_vfs) {
2674 status = be_vf_eth_addr_config(adapter);
2675 if (status)
2676 goto err;
2677 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002678
Sathya Perla11ac75e2011-12-13 00:58:50 +00002679 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002680 lnk_speed = 1000;
2681 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002682 if (status)
2683 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002684 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002685
2686 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2687 vf + 1, vf_cfg->if_handle);
2688 if (status)
2689 goto err;
2690 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002691 }
2692 return 0;
2693err:
2694 return status;
2695}
2696
Sathya Perla30128032011-11-10 19:17:57 +00002697static void be_setup_init(struct be_adapter *adapter)
2698{
2699 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002700 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002701 adapter->if_handle = -1;
2702 adapter->be3_native = false;
2703 adapter->promiscuous = false;
2704 adapter->eq_next_idx = 0;
2705}
2706
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002707static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2708 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002709{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002710 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002711
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002712 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2713 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2714 if (!lancer_chip(adapter) && !be_physfn(adapter))
2715 *active_mac = true;
2716 else
2717 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002718
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002719 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002720 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002721
2722 if (lancer_chip(adapter)) {
2723 status = be_cmd_get_mac_from_list(adapter, mac,
2724 active_mac, pmac_id, 0);
2725 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002726 status = be_cmd_mac_addr_query(adapter, mac, false,
2727 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002728 }
2729 } else if (be_physfn(adapter)) {
2730 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002731 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002732 *active_mac = false;
2733 } else {
2734 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002735 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002736 if_handle, 0);
2737 *active_mac = true;
2738 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002739 return status;
2740}
2741
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002742static void be_get_resources(struct be_adapter *adapter)
2743{
2744 int status;
2745 bool profile_present = false;
2746
2747 if (lancer_chip(adapter)) {
2748 status = be_cmd_get_func_config(adapter);
2749
2750 if (!status)
2751 profile_present = true;
2752 }
2753
2754 if (profile_present) {
2755 /* Sanity fixes for Lancer */
2756 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2757 BE_UC_PMAC_COUNT);
2758 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2759 BE_NUM_VLANS_SUPPORTED);
2760 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2761 BE_MAX_MC);
2762 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2763 MAX_TX_QS);
2764 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2765 BE3_MAX_RSS_QS);
2766 adapter->max_event_queues = min_t(u16,
2767 adapter->max_event_queues,
2768 BE3_MAX_RSS_QS);
2769
2770 if (adapter->max_rss_queues &&
2771 adapter->max_rss_queues == adapter->max_rx_queues)
2772 adapter->max_rss_queues -= 1;
2773
2774 if (adapter->max_event_queues < adapter->max_rss_queues)
2775 adapter->max_rss_queues = adapter->max_event_queues;
2776
2777 } else {
2778 if (be_physfn(adapter))
2779 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2780 else
2781 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2782
2783 if (adapter->function_mode & FLEX10_MODE)
2784 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2785 else
2786 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2787
2788 adapter->max_mcast_mac = BE_MAX_MC;
2789 adapter->max_tx_queues = MAX_TX_QS;
2790 adapter->max_rss_queues = (adapter->be3_native) ?
2791 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2792 adapter->max_event_queues = BE3_MAX_RSS_QS;
2793
2794 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2795 BE_IF_FLAGS_BROADCAST |
2796 BE_IF_FLAGS_MULTICAST |
2797 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2798 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2799 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2800 BE_IF_FLAGS_PROMISCUOUS;
2801
2802 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2803 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2804 }
2805}
2806
Sathya Perla39f1d942012-05-08 19:41:24 +00002807/* Routine to query per function resource limits */
2808static int be_get_config(struct be_adapter *adapter)
2809{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002810 int pos, status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002811 u16 dev_num_vfs;
2812
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002813 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2814 &adapter->function_mode,
2815 &adapter->function_caps);
2816 if (status)
2817 goto err;
2818
2819 be_get_resources(adapter);
2820
2821 /* primary mac needs 1 pmac entry */
2822 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2823 sizeof(u32), GFP_KERNEL);
2824 if (!adapter->pmac_id) {
2825 status = -ENOMEM;
2826 goto err;
2827 }
2828
Sathya Perla39f1d942012-05-08 19:41:24 +00002829 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2830 if (pos) {
2831 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2832 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002833 if (!lancer_chip(adapter))
2834 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002835 adapter->dev_num_vfs = dev_num_vfs;
2836 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002837err:
2838 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002839}
2840
Sathya Perla5fb379e2009-06-18 00:02:59 +00002841static int be_setup(struct be_adapter *adapter)
2842{
Sathya Perla39f1d942012-05-08 19:41:24 +00002843 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002844 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002845 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002846 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002847 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002848 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002849
Sathya Perla30128032011-11-10 19:17:57 +00002850 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002851
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002852 if (!lancer_chip(adapter))
2853 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002854
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002855 status = be_get_config(adapter);
2856 if (status)
2857 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002858
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002859 be_msix_enable(adapter);
2860
2861 status = be_evt_queues_create(adapter);
2862 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002863 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002864
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002865 status = be_tx_cqs_create(adapter);
2866 if (status)
2867 goto err;
2868
2869 status = be_rx_cqs_create(adapter);
2870 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002871 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002872
Sathya Perla5fb379e2009-06-18 00:02:59 +00002873 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002874 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002875 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002876
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002877 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2878 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002879
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002880 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002881 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002882
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002883 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002884
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002885 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002886 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002887 if (status != 0)
2888 goto err;
2889
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002890 memset(mac, 0, ETH_ALEN);
2891 active_mac = false;
2892 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2893 &active_mac, &adapter->pmac_id[0]);
2894 if (status != 0)
2895 goto err;
2896
2897 if (!active_mac) {
2898 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2899 &adapter->pmac_id[0], 0);
2900 if (status != 0)
2901 goto err;
2902 }
2903
2904 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2905 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2906 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002907 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002908
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002909 status = be_tx_qs_create(adapter);
2910 if (status)
2911 goto err;
2912
Sathya Perla04b71172011-09-27 13:30:27 -04002913 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002914
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002915 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002916 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002917
2918 be_set_rx_mode(adapter->netdev);
2919
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002920 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002921
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002922 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2923 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002924 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002925
Sathya Perla39f1d942012-05-08 19:41:24 +00002926 if (be_physfn(adapter) && num_vfs) {
2927 if (adapter->dev_num_vfs)
2928 be_vf_setup(adapter);
2929 else
2930 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002931 }
2932
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002933 be_cmd_get_phy_info(adapter);
2934 if (be_pause_supported(adapter))
2935 adapter->phy.fc_autoneg = 1;
2936
Sathya Perla191eb752012-02-23 18:50:13 +00002937 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2938 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002939 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002940err:
2941 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002942 return status;
2943}
2944
Ivan Vecera66268732011-12-08 01:31:21 +00002945#ifdef CONFIG_NET_POLL_CONTROLLER
2946static void be_netpoll(struct net_device *netdev)
2947{
2948 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002949 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002950 int i;
2951
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002952 for_all_evt_queues(adapter, eqo, i)
2953 event_handle(eqo);
2954
2955 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002956}
2957#endif
2958
Ajit Khaparde84517482009-09-04 03:12:16 +00002959#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002960char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2961
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002962static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002963 const u8 *p, u32 img_start, int image_size,
2964 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002965{
2966 u32 crc_offset;
2967 u8 flashed_crc[4];
2968 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002969
2970 crc_offset = hdr_size + img_start + image_size - 4;
2971
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002972 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002973
2974 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002975 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002976 if (status) {
2977 dev_err(&adapter->pdev->dev,
2978 "could not get crc from flash, not flashing redboot\n");
2979 return false;
2980 }
2981
2982 /*update redboot only if crc does not match*/
2983 if (!memcmp(flashed_crc, p, 4))
2984 return false;
2985 else
2986 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002987}
2988
Sathya Perla306f1342011-08-02 19:57:45 +00002989static bool phy_flashing_required(struct be_adapter *adapter)
2990{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002991 return (adapter->phy.phy_type == TN_8022 &&
2992 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002993}
2994
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002995static bool is_comp_in_ufi(struct be_adapter *adapter,
2996 struct flash_section_info *fsec, int type)
2997{
2998 int i = 0, img_type = 0;
2999 struct flash_section_info_g2 *fsec_g2 = NULL;
3000
3001 if (adapter->generation != BE_GEN3)
3002 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3003
3004 for (i = 0; i < MAX_FLASH_COMP; i++) {
3005 if (fsec_g2)
3006 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3007 else
3008 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3009
3010 if (img_type == type)
3011 return true;
3012 }
3013 return false;
3014
3015}
3016
3017struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3018 int header_size,
3019 const struct firmware *fw)
3020{
3021 struct flash_section_info *fsec = NULL;
3022 const u8 *p = fw->data;
3023
3024 p += header_size;
3025 while (p < (fw->data + fw->size)) {
3026 fsec = (struct flash_section_info *)p;
3027 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3028 return fsec;
3029 p += 32;
3030 }
3031 return NULL;
3032}
3033
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003034static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003035 const struct firmware *fw,
3036 struct be_dma_mem *flash_cmd,
3037 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003038
Ajit Khaparde84517482009-09-04 03:12:16 +00003039{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003040 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003041 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003042 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00003043 int num_bytes;
3044 const u8 *p = fw->data;
3045 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08003046 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003047 int num_comp, hdr_size;
3048 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003049
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003050 struct flash_comp gen3_flash_types[] = {
3051 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3052 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3053 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3054 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3055 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3056 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3057 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3058 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3059 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3060 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3061 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3062 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3063 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3064 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3065 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3066 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3067 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3068 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3069 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3070 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003071 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003072
3073 struct flash_comp gen2_flash_types[] = {
3074 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3075 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3076 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3077 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3078 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3079 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3080 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3081 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3082 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3083 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3084 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3085 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3086 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3087 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3088 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3089 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003090 };
3091
3092 if (adapter->generation == BE_GEN3) {
3093 pflashcomp = gen3_flash_types;
3094 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003095 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003096 } else {
3097 pflashcomp = gen2_flash_types;
3098 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003099 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003100 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003101 /* Get flash section info*/
3102 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3103 if (!fsec) {
3104 dev_err(&adapter->pdev->dev,
3105 "Invalid Cookie. UFI corrupted ?\n");
3106 return -1;
3107 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003108 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003109 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003110 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003111
3112 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3113 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3114 continue;
3115
3116 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003117 if (!phy_flashing_required(adapter))
3118 continue;
3119 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003120
3121 hdr_size = filehdr_size +
3122 (num_of_images * sizeof(struct image_hdr));
3123
3124 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3125 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3126 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003127 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003128
3129 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003130 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003131 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003132 if (p + pflashcomp[i].size > fw->data + fw->size)
3133 return -1;
3134 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003135 while (total_bytes) {
3136 if (total_bytes > 32*1024)
3137 num_bytes = 32*1024;
3138 else
3139 num_bytes = total_bytes;
3140 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003141 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003142 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003143 flash_op = FLASHROM_OPER_PHY_FLASH;
3144 else
3145 flash_op = FLASHROM_OPER_FLASH;
3146 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003147 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003148 flash_op = FLASHROM_OPER_PHY_SAVE;
3149 else
3150 flash_op = FLASHROM_OPER_SAVE;
3151 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003152 memcpy(req->params.data_buf, p, num_bytes);
3153 p += num_bytes;
3154 status = be_cmd_write_flashrom(adapter, flash_cmd,
3155 pflashcomp[i].optype, flash_op, num_bytes);
3156 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003157 if ((status == ILLEGAL_IOCTL_REQ) &&
3158 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003159 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003160 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003161 dev_err(&adapter->pdev->dev,
3162 "cmd to write to flash rom failed.\n");
3163 return -1;
3164 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003165 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003166 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003167 return 0;
3168}
3169
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003170static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3171{
3172 if (fhdr == NULL)
3173 return 0;
3174 if (fhdr->build[0] == '3')
3175 return BE_GEN3;
3176 else if (fhdr->build[0] == '2')
3177 return BE_GEN2;
3178 else
3179 return 0;
3180}
3181
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003182static int lancer_wait_idle(struct be_adapter *adapter)
3183{
3184#define SLIPORT_IDLE_TIMEOUT 30
3185 u32 reg_val;
3186 int status = 0, i;
3187
3188 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3189 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3190 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3191 break;
3192
3193 ssleep(1);
3194 }
3195
3196 if (i == SLIPORT_IDLE_TIMEOUT)
3197 status = -1;
3198
3199 return status;
3200}
3201
3202static int lancer_fw_reset(struct be_adapter *adapter)
3203{
3204 int status = 0;
3205
3206 status = lancer_wait_idle(adapter);
3207 if (status)
3208 return status;
3209
3210 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3211 PHYSDEV_CONTROL_OFFSET);
3212
3213 return status;
3214}
3215
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003216static int lancer_fw_download(struct be_adapter *adapter,
3217 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003218{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003219#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3220#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3221 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003222 const u8 *data_ptr = NULL;
3223 u8 *dest_image_ptr = NULL;
3224 size_t image_size = 0;
3225 u32 chunk_size = 0;
3226 u32 data_written = 0;
3227 u32 offset = 0;
3228 int status = 0;
3229 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003230 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003231
3232 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3233 dev_err(&adapter->pdev->dev,
3234 "FW Image not properly aligned. "
3235 "Length must be 4 byte aligned.\n");
3236 status = -EINVAL;
3237 goto lancer_fw_exit;
3238 }
3239
3240 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3241 + LANCER_FW_DOWNLOAD_CHUNK;
3242 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3243 &flash_cmd.dma, GFP_KERNEL);
3244 if (!flash_cmd.va) {
3245 status = -ENOMEM;
3246 dev_err(&adapter->pdev->dev,
3247 "Memory allocation failure while flashing\n");
3248 goto lancer_fw_exit;
3249 }
3250
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003251 dest_image_ptr = flash_cmd.va +
3252 sizeof(struct lancer_cmd_req_write_object);
3253 image_size = fw->size;
3254 data_ptr = fw->data;
3255
3256 while (image_size) {
3257 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3258
3259 /* Copy the image chunk content. */
3260 memcpy(dest_image_ptr, data_ptr, chunk_size);
3261
3262 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003263 chunk_size, offset,
3264 LANCER_FW_DOWNLOAD_LOCATION,
3265 &data_written, &change_status,
3266 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003267 if (status)
3268 break;
3269
3270 offset += data_written;
3271 data_ptr += data_written;
3272 image_size -= data_written;
3273 }
3274
3275 if (!status) {
3276 /* Commit the FW written */
3277 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003278 0, offset,
3279 LANCER_FW_DOWNLOAD_LOCATION,
3280 &data_written, &change_status,
3281 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003282 }
3283
3284 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3285 flash_cmd.dma);
3286 if (status) {
3287 dev_err(&adapter->pdev->dev,
3288 "Firmware load error. "
3289 "Status code: 0x%x Additional Status: 0x%x\n",
3290 status, add_status);
3291 goto lancer_fw_exit;
3292 }
3293
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003294 if (change_status == LANCER_FW_RESET_NEEDED) {
3295 status = lancer_fw_reset(adapter);
3296 if (status) {
3297 dev_err(&adapter->pdev->dev,
3298 "Adapter busy for FW reset.\n"
3299 "New FW will not be active.\n");
3300 goto lancer_fw_exit;
3301 }
3302 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3303 dev_err(&adapter->pdev->dev,
3304 "System reboot required for new FW"
3305 " to be active\n");
3306 }
3307
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003308 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3309lancer_fw_exit:
3310 return status;
3311}
3312
3313static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3314{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003315 struct flash_file_hdr_g2 *fhdr;
3316 struct flash_file_hdr_g3 *fhdr3;
3317 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003318 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003319 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003320 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003321
3322 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003323 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003324
Ajit Khaparde84517482009-09-04 03:12:16 +00003325 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003326 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3327 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003328 if (!flash_cmd.va) {
3329 status = -ENOMEM;
3330 dev_err(&adapter->pdev->dev,
3331 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003332 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003333 }
3334
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003335 if ((adapter->generation == BE_GEN3) &&
3336 (get_ufigen_type(fhdr) == BE_GEN3)) {
3337 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003338 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3339 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003340 img_hdr_ptr = (struct image_hdr *) (fw->data +
3341 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003342 i * sizeof(struct image_hdr)));
3343 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3344 status = be_flash_data(adapter, fw, &flash_cmd,
3345 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003346 }
3347 } else if ((adapter->generation == BE_GEN2) &&
3348 (get_ufigen_type(fhdr) == BE_GEN2)) {
3349 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3350 } else {
3351 dev_err(&adapter->pdev->dev,
3352 "UFI and Interface are not compatible for flashing\n");
3353 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003354 }
3355
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003356 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3357 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003358 if (status) {
3359 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003360 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003361 }
3362
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003363 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003364
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003365be_fw_exit:
3366 return status;
3367}
3368
3369int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3370{
3371 const struct firmware *fw;
3372 int status;
3373
3374 if (!netif_running(adapter->netdev)) {
3375 dev_err(&adapter->pdev->dev,
3376 "Firmware load not allowed (interface is down)\n");
3377 return -1;
3378 }
3379
3380 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3381 if (status)
3382 goto fw_exit;
3383
3384 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3385
3386 if (lancer_chip(adapter))
3387 status = lancer_fw_download(adapter, fw);
3388 else
3389 status = be_fw_download(adapter, fw);
3390
Ajit Khaparde84517482009-09-04 03:12:16 +00003391fw_exit:
3392 release_firmware(fw);
3393 return status;
3394}
3395
stephen hemmingere5686ad2012-01-05 19:10:25 +00003396static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003397 .ndo_open = be_open,
3398 .ndo_stop = be_close,
3399 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003400 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003401 .ndo_set_mac_address = be_mac_addr_set,
3402 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003403 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003404 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003405 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3406 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003407 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003408 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003409 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003410 .ndo_get_vf_config = be_get_vf_config,
3411#ifdef CONFIG_NET_POLL_CONTROLLER
3412 .ndo_poll_controller = be_netpoll,
3413#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003414};
3415
3416static void be_netdev_init(struct net_device *netdev)
3417{
3418 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003419 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003420 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003421
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003422 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003423 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3424 NETIF_F_HW_VLAN_TX;
3425 if (be_multi_rxq(adapter))
3426 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003427
3428 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003429 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003430
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003431 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003432 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003433
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003434 netdev->priv_flags |= IFF_UNICAST_FLT;
3435
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003436 netdev->flags |= IFF_MULTICAST;
3437
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003438 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003439
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003440 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003441
3442 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3443
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003444 for_all_evt_queues(adapter, eqo, i)
3445 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446}
3447
3448static void be_unmap_pci_bars(struct be_adapter *adapter)
3449{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003450 if (adapter->csr)
3451 iounmap(adapter->csr);
3452 if (adapter->db)
3453 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003454 if (adapter->roce_db.base)
3455 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3456}
3457
3458static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3459{
3460 struct pci_dev *pdev = adapter->pdev;
3461 u8 __iomem *addr;
3462
3463 addr = pci_iomap(pdev, 2, 0);
3464 if (addr == NULL)
3465 return -ENOMEM;
3466
3467 adapter->roce_db.base = addr;
3468 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3469 adapter->roce_db.size = 8192;
3470 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3471 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003472}
3473
3474static int be_map_pci_bars(struct be_adapter *adapter)
3475{
3476 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003477 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003478
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003479 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003480 if (be_type_2_3(adapter)) {
3481 addr = ioremap_nocache(
3482 pci_resource_start(adapter->pdev, 0),
3483 pci_resource_len(adapter->pdev, 0));
3484 if (addr == NULL)
3485 return -ENOMEM;
3486 adapter->db = addr;
3487 }
3488 if (adapter->if_type == SLI_INTF_TYPE_3) {
3489 if (lancer_roce_map_pci_bars(adapter))
3490 goto pci_map_err;
3491 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003492 return 0;
3493 }
3494
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003495 if (be_physfn(adapter)) {
3496 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3497 pci_resource_len(adapter->pdev, 2));
3498 if (addr == NULL)
3499 return -ENOMEM;
3500 adapter->csr = addr;
3501 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003502
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003503 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003504 db_reg = 4;
3505 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003506 if (be_physfn(adapter))
3507 db_reg = 4;
3508 else
3509 db_reg = 0;
3510 }
3511 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3512 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003513 if (addr == NULL)
3514 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003515 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003516 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3517 adapter->roce_db.size = 4096;
3518 adapter->roce_db.io_addr =
3519 pci_resource_start(adapter->pdev, db_reg);
3520 adapter->roce_db.total_size =
3521 pci_resource_len(adapter->pdev, db_reg);
3522 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003523 return 0;
3524pci_map_err:
3525 be_unmap_pci_bars(adapter);
3526 return -ENOMEM;
3527}
3528
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003529static void be_ctrl_cleanup(struct be_adapter *adapter)
3530{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003531 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003532
3533 be_unmap_pci_bars(adapter);
3534
3535 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003536 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3537 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003538
Sathya Perla5b8821b2011-08-02 19:57:44 +00003539 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003540 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003541 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3542 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003543}
3544
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003545static int be_ctrl_init(struct be_adapter *adapter)
3546{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003547 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3548 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003549 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003550 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003551
3552 status = be_map_pci_bars(adapter);
3553 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003554 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003555
3556 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003557 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3558 mbox_mem_alloc->size,
3559 &mbox_mem_alloc->dma,
3560 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003561 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003562 status = -ENOMEM;
3563 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003564 }
3565 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3566 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3567 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3568 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003569
Sathya Perla5b8821b2011-08-02 19:57:44 +00003570 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3571 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3572 &rx_filter->dma, GFP_KERNEL);
3573 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003574 status = -ENOMEM;
3575 goto free_mbox;
3576 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003577 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003578 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003579 spin_lock_init(&adapter->mcc_lock);
3580 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003581
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003582 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003583 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003584 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003585
3586free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003587 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3588 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003589
3590unmap_pci_bars:
3591 be_unmap_pci_bars(adapter);
3592
3593done:
3594 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003595}
3596
3597static void be_stats_cleanup(struct be_adapter *adapter)
3598{
Sathya Perla3abcded2010-10-03 22:12:27 -07003599 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003600
3601 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003602 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3603 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003604}
3605
3606static int be_stats_init(struct be_adapter *adapter)
3607{
Sathya Perla3abcded2010-10-03 22:12:27 -07003608 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003609
Selvin Xavier005d5692011-05-16 07:36:35 +00003610 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003611 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003612 } else {
3613 if (lancer_chip(adapter))
3614 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3615 else
3616 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3617 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003618 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3619 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003620 if (cmd->va == NULL)
3621 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003622 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003623 return 0;
3624}
3625
3626static void __devexit be_remove(struct pci_dev *pdev)
3627{
3628 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003629
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003630 if (!adapter)
3631 return;
3632
Parav Pandit045508a2012-03-26 14:27:13 +00003633 be_roce_dev_remove(adapter);
3634
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003635 cancel_delayed_work_sync(&adapter->func_recovery_work);
3636
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003637 unregister_netdev(adapter->netdev);
3638
Sathya Perla5fb379e2009-06-18 00:02:59 +00003639 be_clear(adapter);
3640
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003641 /* tell fw we're done with firing cmds */
3642 be_cmd_fw_clean(adapter);
3643
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003644 be_stats_cleanup(adapter);
3645
3646 be_ctrl_cleanup(adapter);
3647
Sathya Perlad6b6d982012-09-05 01:56:48 +00003648 pci_disable_pcie_error_reporting(pdev);
3649
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003650 pci_set_drvdata(pdev, NULL);
3651 pci_release_regions(pdev);
3652 pci_disable_device(pdev);
3653
3654 free_netdev(adapter->netdev);
3655}
3656
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003657bool be_is_wol_supported(struct be_adapter *adapter)
3658{
3659 return ((adapter->wol_cap & BE_WOL_CAP) &&
3660 !be_is_wol_excluded(adapter)) ? true : false;
3661}
3662
Somnath Kotur941a77d2012-05-17 22:59:03 +00003663u32 be_get_fw_log_level(struct be_adapter *adapter)
3664{
3665 struct be_dma_mem extfat_cmd;
3666 struct be_fat_conf_params *cfgs;
3667 int status;
3668 u32 level = 0;
3669 int j;
3670
3671 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3672 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3673 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3674 &extfat_cmd.dma);
3675
3676 if (!extfat_cmd.va) {
3677 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3678 __func__);
3679 goto err;
3680 }
3681
3682 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3683 if (!status) {
3684 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3685 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003686 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003687 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3688 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3689 }
3690 }
3691 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3692 extfat_cmd.dma);
3693err:
3694 return level;
3695}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003696
Sathya Perla39f1d942012-05-08 19:41:24 +00003697static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003698{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003699 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003700 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003701
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003702 status = be_cmd_get_cntl_attributes(adapter);
3703 if (status)
3704 return status;
3705
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003706 status = be_cmd_get_acpi_wol_cap(adapter);
3707 if (status) {
3708 /* in case of a failure to get wol capabillities
3709 * check the exclusion list to determine WOL capability */
3710 if (!be_is_wol_excluded(adapter))
3711 adapter->wol_cap |= BE_WOL_CAP;
3712 }
3713
3714 if (be_is_wol_supported(adapter))
3715 adapter->wol = true;
3716
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003717 /* Must be a power of 2 or else MODULO will BUG_ON */
3718 adapter->be_get_temp_freq = 64;
3719
Somnath Kotur941a77d2012-05-17 22:59:03 +00003720 level = be_get_fw_log_level(adapter);
3721 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3722
Sathya Perla2243e2e2009-11-22 22:02:03 +00003723 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003724}
3725
Sathya Perla39f1d942012-05-08 19:41:24 +00003726static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003727{
3728 struct pci_dev *pdev = adapter->pdev;
3729 u32 sli_intf = 0, if_type;
3730
3731 switch (pdev->device) {
3732 case BE_DEVICE_ID1:
3733 case OC_DEVICE_ID1:
3734 adapter->generation = BE_GEN2;
3735 break;
3736 case BE_DEVICE_ID2:
3737 case OC_DEVICE_ID2:
3738 adapter->generation = BE_GEN3;
3739 break;
3740 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003741 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003742 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003743 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3744 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003745 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3746 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003747 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003748 !be_type_2_3(adapter)) {
3749 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3750 return -EINVAL;
3751 }
3752 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3753 SLI_INTF_FAMILY_SHIFT);
3754 adapter->generation = BE_GEN3;
3755 break;
3756 case OC_DEVICE_ID5:
3757 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3758 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003759 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3760 return -EINVAL;
3761 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003762 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3763 SLI_INTF_FAMILY_SHIFT);
3764 adapter->generation = BE_GEN3;
3765 break;
3766 default:
3767 adapter->generation = 0;
3768 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003769
3770 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3771 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003772 return 0;
3773}
3774
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003775static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003776{
3777 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003778
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003779 status = lancer_test_and_set_rdy_state(adapter);
3780 if (status)
3781 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003782
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003783 if (netif_running(adapter->netdev))
3784 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003785
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003786 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003787
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003788 adapter->hw_error = false;
3789 adapter->fw_timeout = false;
3790
3791 status = be_setup(adapter);
3792 if (status)
3793 goto err;
3794
3795 if (netif_running(adapter->netdev)) {
3796 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003797 if (status)
3798 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003799 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003800
3801 dev_err(&adapter->pdev->dev,
3802 "Adapter SLIPORT recovery succeeded\n");
3803 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003804err:
3805 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003806 "Adapter SLIPORT recovery failed\n");
3807
3808 return status;
3809}
3810
3811static void be_func_recovery_task(struct work_struct *work)
3812{
3813 struct be_adapter *adapter =
3814 container_of(work, struct be_adapter, func_recovery_work.work);
3815 int status;
3816
3817 be_detect_error(adapter);
3818
3819 if (adapter->hw_error && lancer_chip(adapter)) {
3820
3821 if (adapter->eeh_error)
3822 goto out;
3823
3824 rtnl_lock();
3825 netif_device_detach(adapter->netdev);
3826 rtnl_unlock();
3827
3828 status = lancer_recover_func(adapter);
3829
3830 if (!status)
3831 netif_device_attach(adapter->netdev);
3832 }
3833
3834out:
3835 schedule_delayed_work(&adapter->func_recovery_work,
3836 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003837}
3838
3839static void be_worker(struct work_struct *work)
3840{
3841 struct be_adapter *adapter =
3842 container_of(work, struct be_adapter, work.work);
3843 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003844 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003845 int i;
3846
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003847 /* when interrupts are not yet enabled, just reap any pending
3848 * mcc completions */
3849 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00003850 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003851 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00003852 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003853 goto reschedule;
3854 }
3855
3856 if (!adapter->stats_cmd_sent) {
3857 if (lancer_chip(adapter))
3858 lancer_cmd_get_pport_stats(adapter,
3859 &adapter->stats_cmd);
3860 else
3861 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3862 }
3863
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003864 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3865 be_cmd_get_die_temperature(adapter);
3866
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003867 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003868 if (rxo->rx_post_starved) {
3869 rxo->rx_post_starved = false;
3870 be_post_rx_frags(rxo, GFP_KERNEL);
3871 }
3872 }
3873
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003874 for_all_evt_queues(adapter, eqo, i)
3875 be_eqd_update(adapter, eqo);
3876
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003877reschedule:
3878 adapter->work_counter++;
3879 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3880}
3881
Sathya Perla39f1d942012-05-08 19:41:24 +00003882static bool be_reset_required(struct be_adapter *adapter)
3883{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003884 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003885}
3886
Sathya Perlad3791422012-09-28 04:39:44 +00003887static char *mc_name(struct be_adapter *adapter)
3888{
3889 if (adapter->function_mode & FLEX10_MODE)
3890 return "FLEX10";
3891 else if (adapter->function_mode & VNIC_MODE)
3892 return "vNIC";
3893 else if (adapter->function_mode & UMC_ENABLED)
3894 return "UMC";
3895 else
3896 return "";
3897}
3898
3899static inline char *func_name(struct be_adapter *adapter)
3900{
3901 return be_physfn(adapter) ? "PF" : "VF";
3902}
3903
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003904static int __devinit be_probe(struct pci_dev *pdev,
3905 const struct pci_device_id *pdev_id)
3906{
3907 int status = 0;
3908 struct be_adapter *adapter;
3909 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003910 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003911
3912 status = pci_enable_device(pdev);
3913 if (status)
3914 goto do_none;
3915
3916 status = pci_request_regions(pdev, DRV_NAME);
3917 if (status)
3918 goto disable_dev;
3919 pci_set_master(pdev);
3920
Sathya Perla7f640062012-06-05 19:37:20 +00003921 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003922 if (netdev == NULL) {
3923 status = -ENOMEM;
3924 goto rel_reg;
3925 }
3926 adapter = netdev_priv(netdev);
3927 adapter->pdev = pdev;
3928 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003929
Sathya Perla39f1d942012-05-08 19:41:24 +00003930 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003931 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003932 goto free_netdev;
3933
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003934 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003935 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003936
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003937 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003938 if (!status) {
3939 netdev->features |= NETIF_F_HIGHDMA;
3940 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003941 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003942 if (status) {
3943 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3944 goto free_netdev;
3945 }
3946 }
3947
Sathya Perlad6b6d982012-09-05 01:56:48 +00003948 status = pci_enable_pcie_error_reporting(pdev);
3949 if (status)
3950 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3951
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003952 status = be_ctrl_init(adapter);
3953 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003954 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003955
Sathya Perla2243e2e2009-11-22 22:02:03 +00003956 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003957 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003958 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003959 if (status)
3960 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003961 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003962
3963 /* tell fw we're ready to fire cmds */
3964 status = be_cmd_fw_init(adapter);
3965 if (status)
3966 goto ctrl_clean;
3967
Sathya Perla39f1d942012-05-08 19:41:24 +00003968 if (be_reset_required(adapter)) {
3969 status = be_cmd_reset_function(adapter);
3970 if (status)
3971 goto ctrl_clean;
3972 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003973
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003974 /* The INTR bit may be set in the card when probed by a kdump kernel
3975 * after a crash.
3976 */
3977 if (!lancer_chip(adapter))
3978 be_intr_set(adapter, false);
3979
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003980 status = be_stats_init(adapter);
3981 if (status)
3982 goto ctrl_clean;
3983
Sathya Perla39f1d942012-05-08 19:41:24 +00003984 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003985 if (status)
3986 goto stats_clean;
3987
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003988 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003989 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003990 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003991
Sathya Perla5fb379e2009-06-18 00:02:59 +00003992 status = be_setup(adapter);
3993 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00003994 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003995
Sathya Perla3abcded2010-10-03 22:12:27 -07003996 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003997 status = register_netdev(netdev);
3998 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003999 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004000
Parav Pandit045508a2012-03-26 14:27:13 +00004001 be_roce_dev_add(adapter);
4002
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004003 schedule_delayed_work(&adapter->func_recovery_work,
4004 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004005
4006 be_cmd_query_port_name(adapter, &port_name);
4007
Sathya Perlad3791422012-09-28 04:39:44 +00004008 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4009 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004010
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004011 return 0;
4012
Sathya Perla5fb379e2009-06-18 00:02:59 +00004013unsetup:
4014 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004015stats_clean:
4016 be_stats_cleanup(adapter);
4017ctrl_clean:
4018 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004019free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004020 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004021 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004022rel_reg:
4023 pci_release_regions(pdev);
4024disable_dev:
4025 pci_disable_device(pdev);
4026do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004027 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004028 return status;
4029}
4030
4031static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4032{
4033 struct be_adapter *adapter = pci_get_drvdata(pdev);
4034 struct net_device *netdev = adapter->netdev;
4035
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004036 if (adapter->wol)
4037 be_setup_wol(adapter, true);
4038
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004039 cancel_delayed_work_sync(&adapter->func_recovery_work);
4040
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004041 netif_device_detach(netdev);
4042 if (netif_running(netdev)) {
4043 rtnl_lock();
4044 be_close(netdev);
4045 rtnl_unlock();
4046 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004047 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004048
4049 pci_save_state(pdev);
4050 pci_disable_device(pdev);
4051 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4052 return 0;
4053}
4054
4055static int be_resume(struct pci_dev *pdev)
4056{
4057 int status = 0;
4058 struct be_adapter *adapter = pci_get_drvdata(pdev);
4059 struct net_device *netdev = adapter->netdev;
4060
4061 netif_device_detach(netdev);
4062
4063 status = pci_enable_device(pdev);
4064 if (status)
4065 return status;
4066
4067 pci_set_power_state(pdev, 0);
4068 pci_restore_state(pdev);
4069
Sathya Perla2243e2e2009-11-22 22:02:03 +00004070 /* tell fw we're ready to fire cmds */
4071 status = be_cmd_fw_init(adapter);
4072 if (status)
4073 return status;
4074
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004075 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004076 if (netif_running(netdev)) {
4077 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004078 be_open(netdev);
4079 rtnl_unlock();
4080 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004081
4082 schedule_delayed_work(&adapter->func_recovery_work,
4083 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004084 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004085
4086 if (adapter->wol)
4087 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004088
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004089 return 0;
4090}
4091
Sathya Perla82456b02010-02-17 01:35:37 +00004092/*
4093 * An FLR will stop BE from DMAing any data.
4094 */
4095static void be_shutdown(struct pci_dev *pdev)
4096{
4097 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004098
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004099 if (!adapter)
4100 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004101
Sathya Perla0f4a6822011-03-21 20:49:28 +00004102 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004103 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004104
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004105 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004106
Sathya Perla82456b02010-02-17 01:35:37 +00004107 if (adapter->wol)
4108 be_setup_wol(adapter, true);
4109
Ajit Khaparde57841862011-04-06 18:08:43 +00004110 be_cmd_reset_function(adapter);
4111
Sathya Perla82456b02010-02-17 01:35:37 +00004112 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004113}
4114
Sathya Perlacf588472010-02-14 21:22:01 +00004115static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4116 pci_channel_state_t state)
4117{
4118 struct be_adapter *adapter = pci_get_drvdata(pdev);
4119 struct net_device *netdev = adapter->netdev;
4120
4121 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4122
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004123 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004124
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004125 cancel_delayed_work_sync(&adapter->func_recovery_work);
4126
4127 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004128 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004129 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004130
4131 if (netif_running(netdev)) {
4132 rtnl_lock();
4133 be_close(netdev);
4134 rtnl_unlock();
4135 }
4136 be_clear(adapter);
4137
4138 if (state == pci_channel_io_perm_failure)
4139 return PCI_ERS_RESULT_DISCONNECT;
4140
4141 pci_disable_device(pdev);
4142
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004143 /* The error could cause the FW to trigger a flash debug dump.
4144 * Resetting the card while flash dump is in progress
4145 * can cause it not to recover; wait for it to finish
4146 */
4147 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00004148 return PCI_ERS_RESULT_NEED_RESET;
4149}
4150
4151static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4152{
4153 struct be_adapter *adapter = pci_get_drvdata(pdev);
4154 int status;
4155
4156 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004157 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004158
4159 status = pci_enable_device(pdev);
4160 if (status)
4161 return PCI_ERS_RESULT_DISCONNECT;
4162
4163 pci_set_master(pdev);
4164 pci_set_power_state(pdev, 0);
4165 pci_restore_state(pdev);
4166
4167 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004168 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004169 if (status)
4170 return PCI_ERS_RESULT_DISCONNECT;
4171
Sathya Perlad6b6d982012-09-05 01:56:48 +00004172 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004173 return PCI_ERS_RESULT_RECOVERED;
4174}
4175
4176static void be_eeh_resume(struct pci_dev *pdev)
4177{
4178 int status = 0;
4179 struct be_adapter *adapter = pci_get_drvdata(pdev);
4180 struct net_device *netdev = adapter->netdev;
4181
4182 dev_info(&adapter->pdev->dev, "EEH resume\n");
4183
4184 pci_save_state(pdev);
4185
4186 /* tell fw we're ready to fire cmds */
4187 status = be_cmd_fw_init(adapter);
4188 if (status)
4189 goto err;
4190
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004191 status = be_cmd_reset_function(adapter);
4192 if (status)
4193 goto err;
4194
Sathya Perlacf588472010-02-14 21:22:01 +00004195 status = be_setup(adapter);
4196 if (status)
4197 goto err;
4198
4199 if (netif_running(netdev)) {
4200 status = be_open(netdev);
4201 if (status)
4202 goto err;
4203 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004204
4205 schedule_delayed_work(&adapter->func_recovery_work,
4206 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004207 netif_device_attach(netdev);
4208 return;
4209err:
4210 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004211}
4212
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004213static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004214 .error_detected = be_eeh_err_detected,
4215 .slot_reset = be_eeh_reset,
4216 .resume = be_eeh_resume,
4217};
4218
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004219static struct pci_driver be_driver = {
4220 .name = DRV_NAME,
4221 .id_table = be_dev_ids,
4222 .probe = be_probe,
4223 .remove = be_remove,
4224 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004225 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004226 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004227 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004228};
4229
4230static int __init be_init_module(void)
4231{
Joe Perches8e95a202009-12-03 07:58:21 +00004232 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4233 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004234 printk(KERN_WARNING DRV_NAME
4235 " : Module param rx_frag_size must be 2048/4096/8192."
4236 " Using 2048\n");
4237 rx_frag_size = 2048;
4238 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004239
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004240 return pci_register_driver(&be_driver);
4241}
4242module_init(be_init_module);
4243
4244static void __exit be_exit_module(void)
4245{
4246 pci_unregister_driver(&be_driver);
4247}
4248module_exit(be_exit_module);