blob: 0b49201b8b4595fbd64b24676dde5828e44d2dfc [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070051static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000052 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070086static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000087 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700110 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
Sathya Perla752961a2011-10-24 02:45:03 +0000121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 mem->va = NULL;
135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000150 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
Sathya Perla8788fdc2009-07-27 22:52:03 +0000155static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000159 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000160 return;
161
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000205 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000206 return;
207
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000224 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000225 return;
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000238 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000239 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000241 if (!is_valid_ether_addr(addr->sa_data))
242 return -EADDRNOTAVAIL;
243
Sathya Perla5ee49792012-09-28 04:39:41 +0000244 status = be_cmd_mac_addr_query(adapter, current_mac, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000561 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562}
563
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566{
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578}
579
Somnath Kotur93040ae2012-06-26 22:32:10 +0000580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
Somnath Koturcc4ce022010-10-21 07:11:14 -0700585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000588 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700589
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590 memset(hdr, 0, sizeof(*hdr));
591
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000594 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 }
616
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700617 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 }
622
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627}
628
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000630 bool unmap_single)
631{
632 dma_addr_t dma;
633
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000637 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000638 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 dma_unmap_single(dev, dma, wrb->frag_len,
640 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000641 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000643 }
644}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla3c8def92011-06-12 20:01:58 +0000646static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648{
Sathya Perla7101e112010-03-22 20:41:12 +0000649 dma_addr_t busaddr;
650 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000655 bool map_single = false;
656 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 hdr = queue_head_node(txq);
659 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000660 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700663 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000666 goto dma_err;
667 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
672 copied += len;
673 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674
David S. Millerebc8d2a2009-06-09 01:01:31 -0700675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000676 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700677 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000678 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000679 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000680 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000681 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700682 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000686 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 }
688
689 if (dummy_wrb) {
690 wrb = queue_head_node(txq);
691 wrb_fill(wrb, 0, 0);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 queue_head_inc(txq);
694 }
695
Somnath Koturcc4ce022010-10-21 07:11:14 -0700696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000700dma_err:
701 txq->head = map_head;
702 while (copied) {
703 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000704 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000705 map_single = false;
706 copied -= wrb->frag_len;
707 queue_head_inc(txq);
708 }
709 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710}
711
Somnath Kotur93040ae2012-06-26 22:32:10 +0000712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
Stephen Hemminger613573252009-08-31 19:50:58 +0000730static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700731 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732{
733 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000736 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000738 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 bool dummy_wrb, stopped = false;
740
Somnath Kotur93040ae2012-06-26 22:32:10 +0000741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
743
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000746 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 is_ipv4_pkt(skb)) {
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 }
752
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000760 if (unlikely(!skb))
761 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762 }
763
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765
Sathya Perla3c8def92011-06-12 20:01:58 +0000766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000767 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000768 int gso_segs = skb_shinfo(skb)->gso_segs;
769
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000770 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
Sathya Perla7101e112010-03-22 20:41:12 +0000778 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000782 stopped = true;
783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000785 be_txq_notify(adapter, txq->id, wrb_cnt);
786
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000792tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 */
Sathya Perla10329df2012-06-05 19:37:18 +0000818static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
Sathya Perla10329df2012-06-05 19:37:18 +0000820 u16 vids[BE_NUM_VLANS_SUPPORTED];
821 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000822 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000823
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
826 return 0;
827
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
830
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000834 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000835
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000837 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000838
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
840 if (status) {
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000845
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000847
848set_vlan_promisc:
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 NULL, 0, 1, 1);
851 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852}
853
Jiri Pirko8e586132011-12-08 19:52:37 -0500854static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855{
856 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000857 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000863
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000866 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500867
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000868 if (!status)
869 adapter->vlans_added++;
870 else
871 adapter->vlan_tag[vid] = 0;
872ret:
873 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874}
875
Jiri Pirko8e586132011-12-08 19:52:37 -0500876static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877{
878 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000879 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000881 if (!be_physfn(adapter)) {
882 status = -EINVAL;
883 goto ret;
884 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000885
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000887 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000888 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500889
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000890 if (!status)
891 adapter->vlans_added--;
892 else
893 adapter->vlan_tag[vid] = 1;
894ret:
895 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896}
897
Sathya Perlaa54769f2011-10-24 02:45:00 +0000898static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899{
900 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000901 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700902
903 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000905 adapter->promiscuous = true;
906 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000908
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300909 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000913
914 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000915 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000916 }
917
Sathya Perlae7b909a2009-11-22 22:01:10 +0000918 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000919 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000920 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000922 goto done;
923 }
924
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
928
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
932 }
933
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
937 goto done;
938 }
939
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 adapter->if_handle,
944 &adapter->pmac_id[adapter->uc_macs], 0);
945 }
946 }
947
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
951 if (status) {
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000956done:
957 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958}
959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000964 int status;
965
Sathya Perla11ac75e2011-12-13 00:58:50 +0000966 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000967 return -EPERM;
968
Sathya Perla11ac75e2011-12-13 00:58:50 +0000969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000970 return -EINVAL;
971
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000980 }
981
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000982 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000987
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000988 return status;
989}
990
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000991static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000996
Sathya Perla11ac75e2011-12-13 00:58:50 +0000997 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000998 return -EPERM;
999
Sathya Perla11ac75e2011-12-13 00:58:50 +00001000 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001001 return -EINVAL;
1002
1003 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001006 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001008
1009 return 0;
1010}
1011
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001012static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1014{
1015 struct be_adapter *adapter = netdev_priv(netdev);
1016 int status = 0;
1017
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001019 return -EPERM;
1020
Sathya Perla11ac75e2011-12-13 00:58:50 +00001021 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001022 return -EINVAL;
1023
1024 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1031 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001032 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001033 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001034 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001038 }
1039
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001040
1041 if (status)
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1044 return status;
1045}
1046
Ajit Khapardee1d18732010-07-23 01:52:13 +00001047static int be_set_vf_tx_rate(struct net_device *netdev,
1048 int vf, int rate)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 int status = 0;
1052
Sathya Perla11ac75e2011-12-13 00:58:50 +00001053 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001054 return -EPERM;
1055
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001056 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001057 return -EINVAL;
1058
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1062 return -EINVAL;
1063 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001064
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001065 if (lancer_chip(adapter))
1066 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1067 else
1068 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001069
1070 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001071 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001072 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001073 else
1074 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001075 return status;
1076}
1077
Sathya Perla39f1d942012-05-08 19:41:24 +00001078static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1079{
1080 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001081 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001082 u16 offset, stride;
1083
1084 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001085 if (!pos)
1086 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001087 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1088 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1089
1090 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1091 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001092 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001093 vfs++;
1094 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1095 assigned_vfs++;
1096 }
1097 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1098 }
1099 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1100}
1101
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001102static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001104 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001105 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001106 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001107 u64 pkts;
1108 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001109
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001110 if (!eqo->enable_aic) {
1111 eqd = eqo->eqd;
1112 goto modify_eqd;
1113 }
1114
1115 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001116 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001118 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1119
Sathya Perla4097f662009-03-24 16:40:13 -07001120 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001121 if (time_before(now, stats->rx_jiffies)) {
1122 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001123 return;
1124 }
1125
Sathya Perlaac124ff2011-07-25 19:10:14 +00001126 /* Update once a second */
1127 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001128 return;
1129
Sathya Perlaab1594e2011-07-25 19:10:15 +00001130 do {
1131 start = u64_stats_fetch_begin_bh(&stats->sync);
1132 pkts = stats->rx_pkts;
1133 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1134
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001135 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001136 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001137 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001138 eqd = (stats->rx_pps / 110000) << 3;
1139 eqd = min(eqd, eqo->max_eqd);
1140 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001141 if (eqd < 10)
1142 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001143
1144modify_eqd:
1145 if (eqd != eqo->cur_eqd) {
1146 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1147 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001148 }
Sathya Perla4097f662009-03-24 16:40:13 -07001149}
1150
Sathya Perla3abcded2010-10-03 22:12:27 -07001151static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001152 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001153{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001154 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001155
Sathya Perlaab1594e2011-07-25 19:10:15 +00001156 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001157 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001158 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001159 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001160 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001161 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001162 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001163 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001164 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165}
1166
Sathya Perla2e588f82011-03-11 02:49:26 +00001167static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001168{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001169 /* L4 checksum is not reliable for non TCP/UDP packets.
1170 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001171 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1172 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001173}
1174
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001175static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1176 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001178 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001180 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181
Sathya Perla3abcded2010-10-03 22:12:27 -07001182 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183 BUG_ON(!rx_page_info->page);
1184
Ajit Khaparde205859a2010-02-09 01:34:21 +00001185 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001186 dma_unmap_page(&adapter->pdev->dev,
1187 dma_unmap_addr(rx_page_info, bus),
1188 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001189 rx_page_info->last_page_user = false;
1190 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191
1192 atomic_dec(&rxq->used);
1193 return rx_page_info;
1194}
1195
1196/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001197static void be_rx_compl_discard(struct be_rx_obj *rxo,
1198 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199{
Sathya Perla3abcded2010-10-03 22:12:27 -07001200 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001202 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001204 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001205 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001206 put_page(page_info->page);
1207 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001208 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209 }
1210}
1211
1212/*
1213 * skb_fill_rx_data forms a complete skb for an ether frame
1214 * indicated by rxcp.
1215 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001216static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1217 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218{
Sathya Perla3abcded2010-10-03 22:12:27 -07001219 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001221 u16 i, j;
1222 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223 u8 *start;
1224
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001225 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001226 start = page_address(page_info->page) + page_info->page_offset;
1227 prefetch(start);
1228
1229 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001230 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232 skb->len = curr_frag_len;
1233 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001234 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235 /* Complete packet has now been moved to data */
1236 put_page(page_info->page);
1237 skb->data_len = 0;
1238 skb->tail += curr_frag_len;
1239 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001240 hdr_len = ETH_HLEN;
1241 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001243 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001244 skb_shinfo(skb)->frags[0].page_offset =
1245 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001246 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001248 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 skb->tail += hdr_len;
1250 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001251 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252
Sathya Perla2e588f82011-03-11 02:49:26 +00001253 if (rxcp->pkt_size <= rx_frag_size) {
1254 BUG_ON(rxcp->num_rcvd != 1);
1255 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 }
1257
1258 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001259 index_inc(&rxcp->rxq_idx, rxq->len);
1260 remaining = rxcp->pkt_size - curr_frag_len;
1261 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001262 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001263 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001265 /* Coalesce all frags from the same physical page in one slot */
1266 if (page_info->page_offset == 0) {
1267 /* Fresh page */
1268 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001269 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001270 skb_shinfo(skb)->frags[j].page_offset =
1271 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001272 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001273 skb_shinfo(skb)->nr_frags++;
1274 } else {
1275 put_page(page_info->page);
1276 }
1277
Eric Dumazet9e903e02011-10-18 21:00:24 +00001278 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279 skb->len += curr_frag_len;
1280 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001281 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001282 remaining -= curr_frag_len;
1283 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001284 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001285 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001286 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287}
1288
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001289/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001290static void be_rx_compl_process(struct be_rx_obj *rxo,
1291 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001293 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001294 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001296
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001297 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001298 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001299 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001300 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301 return;
1302 }
1303
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001304 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001306 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001307 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001308 else
1309 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001311 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001312 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001313 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001314 skb->rxhash = rxcp->rss_hash;
1315
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316
Jiri Pirko343e43c2011-08-25 02:50:51 +00001317 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001318 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1319
1320 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321}
1322
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001323/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001324void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1325 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001327 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001328 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001329 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001330 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001331 u16 remaining, curr_frag_len;
1332 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001333
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001334 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001335 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001336 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001337 return;
1338 }
1339
Sathya Perla2e588f82011-03-11 02:49:26 +00001340 remaining = rxcp->pkt_size;
1341 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001342 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001343
1344 curr_frag_len = min(remaining, rx_frag_size);
1345
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001346 /* Coalesce all frags from the same physical page in one slot */
1347 if (i == 0 || page_info->page_offset == 0) {
1348 /* First frag or Fresh page */
1349 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001350 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001351 skb_shinfo(skb)->frags[j].page_offset =
1352 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001353 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001354 } else {
1355 put_page(page_info->page);
1356 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001357 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001358 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001360 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361 memset(page_info, 0, sizeof(*page_info));
1362 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001363 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001365 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001366 skb->len = rxcp->pkt_size;
1367 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001368 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001369 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001370 if (adapter->netdev->features & NETIF_F_RXHASH)
1371 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001372
Jiri Pirko343e43c2011-08-25 02:50:51 +00001373 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001374 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001376 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377}
1378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001379static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1380 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381{
Sathya Perla2e588f82011-03-11 02:49:26 +00001382 rxcp->pkt_size =
1383 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1384 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1385 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1386 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001387 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001388 rxcp->ip_csum =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1390 rxcp->l4_csum =
1391 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1392 rxcp->ipv6 =
1393 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1394 rxcp->rxq_idx =
1395 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1396 rxcp->num_rcvd =
1397 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1398 rxcp->pkt_type =
1399 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001400 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001401 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001402 if (rxcp->vlanf) {
1403 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001404 compl);
1405 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1406 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001407 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001408 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001409}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001411static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1412 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001413{
1414 rxcp->pkt_size =
1415 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1416 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1417 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1418 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001419 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001420 rxcp->ip_csum =
1421 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1422 rxcp->l4_csum =
1423 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1424 rxcp->ipv6 =
1425 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1426 rxcp->rxq_idx =
1427 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1428 rxcp->num_rcvd =
1429 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1430 rxcp->pkt_type =
1431 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001432 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001433 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001434 if (rxcp->vlanf) {
1435 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001436 compl);
1437 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1438 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001439 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001440 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001441}
1442
1443static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1444{
1445 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1446 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1447 struct be_adapter *adapter = rxo->adapter;
1448
1449 /* For checking the valid bit it is Ok to use either definition as the
1450 * valid bit is at the same position in both v0 and v1 Rx compl */
1451 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452 return NULL;
1453
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001454 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001455 be_dws_le_to_cpu(compl, sizeof(*compl));
1456
1457 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001458 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001459 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001460 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001461
Sathya Perla15d72182011-03-21 20:49:26 +00001462 if (rxcp->vlanf) {
1463 /* vlanf could be wrongly set in some cards.
1464 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001465 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001466 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001467
Sathya Perla15d72182011-03-21 20:49:26 +00001468 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001469 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001470
Somnath Kotur939cf302011-08-18 21:51:49 -07001471 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001472 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001473 rxcp->vlanf = 0;
1474 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001475
1476 /* As the compl has been parsed, reset it; we wont touch it again */
1477 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478
Sathya Perla3abcded2010-10-03 22:12:27 -07001479 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 return rxcp;
1481}
1482
Eric Dumazet1829b082011-03-01 05:48:12 +00001483static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001486
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001488 gfp |= __GFP_COMP;
1489 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
1492/*
1493 * Allocate a page, split it to fragments of size rx_frag_size and post as
1494 * receive buffers to BE
1495 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001496static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497{
Sathya Perla3abcded2010-10-03 22:12:27 -07001498 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001499 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001500 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501 struct page *pagep = NULL;
1502 struct be_eth_rx_d *rxd;
1503 u64 page_dmaaddr = 0, frag_dmaaddr;
1504 u32 posted, page_offset = 0;
1505
Sathya Perla3abcded2010-10-03 22:12:27 -07001506 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1508 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001509 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001511 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 break;
1513 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001514 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1515 0, adapter->big_page_size,
1516 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 page_info->page_offset = 0;
1518 } else {
1519 get_page(pagep);
1520 page_info->page_offset = page_offset + rx_frag_size;
1521 }
1522 page_offset = page_info->page_offset;
1523 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001524 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1526
1527 rxd = queue_head_node(rxq);
1528 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1529 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530
1531 /* Any space left in the current big page for another frag? */
1532 if ((page_offset + rx_frag_size + rx_frag_size) >
1533 adapter->big_page_size) {
1534 pagep = NULL;
1535 page_info->last_page_user = true;
1536 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001537
1538 prev_page_info = page_info;
1539 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001540 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 }
1542 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001543 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544
1545 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001547 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001548 } else if (atomic_read(&rxq->used) == 0) {
1549 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001550 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552}
1553
Sathya Perla5fb379e2009-06-18 00:02:59 +00001554static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1557
1558 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1559 return NULL;
1560
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001561 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1563
1564 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1565
1566 queue_tail_inc(tx_cq);
1567 return txcp;
1568}
1569
Sathya Perla3c8def92011-06-12 20:01:58 +00001570static u16 be_tx_compl_process(struct be_adapter *adapter,
1571 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572{
Sathya Perla3c8def92011-06-12 20:01:58 +00001573 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001574 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001575 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001577 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1578 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001580 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001582 sent_skbs[txq->tail] = NULL;
1583
1584 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001585 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001587 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001589 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001590 unmap_tx_frag(&adapter->pdev->dev, wrb,
1591 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001592 unmap_skb_hdr = false;
1593
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594 num_wrbs++;
1595 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001596 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001599 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600}
1601
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001602/* Return the number of events in the event queue */
1603static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001604{
1605 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001607
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608 do {
1609 eqe = queue_tail_node(&eqo->q);
1610 if (eqe->evt == 0)
1611 break;
1612
1613 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001614 eqe->evt = 0;
1615 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001616 queue_tail_inc(&eqo->q);
1617 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001618
1619 return num;
1620}
1621
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001622static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001623{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001624 bool rearm = false;
1625 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001626
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001627 /* Deal with any spurious interrupts that come without events */
1628 if (!num)
1629 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001630
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001631 if (num || msix_enabled(eqo->adapter))
1632 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1633
Sathya Perla859b1e42009-08-10 03:43:51 +00001634 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001635 napi_schedule(&eqo->napi);
1636
1637 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001638}
1639
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001640/* Leaves the EQ is disarmed state */
1641static void be_eq_clean(struct be_eq_obj *eqo)
1642{
1643 int num = events_get(eqo);
1644
1645 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1646}
1647
1648static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649{
1650 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001651 struct be_queue_info *rxq = &rxo->q;
1652 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001653 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 u16 tail;
1655
1656 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001657 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001658 be_rx_compl_discard(rxo, rxcp);
1659 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660 }
1661
1662 /* Then free posted rx buffer that were not used */
1663 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001664 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001665 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666 put_page(page_info->page);
1667 memset(page_info, 0, sizeof(*page_info));
1668 }
1669 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001670 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671}
1672
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001673static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001675 struct be_tx_obj *txo;
1676 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001677 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001678 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001679 struct sk_buff *sent_skb;
1680 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001681 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682
Sathya Perlaa8e91792009-08-10 03:42:43 +00001683 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1684 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001685 pending_txqs = adapter->num_tx_qs;
1686
1687 for_all_tx_queues(adapter, txo, i) {
1688 txq = &txo->q;
1689 while ((txcp = be_tx_compl_get(&txo->cq))) {
1690 end_idx =
1691 AMAP_GET_BITS(struct amap_eth_tx_compl,
1692 wrb_index, txcp);
1693 num_wrbs += be_tx_compl_process(adapter, txo,
1694 end_idx);
1695 cmpl++;
1696 }
1697 if (cmpl) {
1698 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1699 atomic_sub(num_wrbs, &txq->used);
1700 cmpl = 0;
1701 num_wrbs = 0;
1702 }
1703 if (atomic_read(&txq->used) == 0)
1704 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001705 }
1706
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001707 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001708 break;
1709
1710 mdelay(1);
1711 } while (true);
1712
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001713 for_all_tx_queues(adapter, txo, i) {
1714 txq = &txo->q;
1715 if (atomic_read(&txq->used))
1716 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1717 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001718
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001719 /* free posted tx for which compls will never arrive */
1720 while (atomic_read(&txq->used)) {
1721 sent_skb = txo->sent_skb_list[txq->tail];
1722 end_idx = txq->tail;
1723 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1724 &dummy_wrb);
1725 index_adv(&end_idx, num_wrbs - 1, txq->len);
1726 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1727 atomic_sub(num_wrbs, &txq->used);
1728 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001729 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730}
1731
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001732static void be_evt_queues_destroy(struct be_adapter *adapter)
1733{
1734 struct be_eq_obj *eqo;
1735 int i;
1736
1737 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001738 if (eqo->q.created) {
1739 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001740 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001741 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001742 be_queue_free(adapter, &eqo->q);
1743 }
1744}
1745
1746static int be_evt_queues_create(struct be_adapter *adapter)
1747{
1748 struct be_queue_info *eq;
1749 struct be_eq_obj *eqo;
1750 int i, rc;
1751
1752 adapter->num_evt_qs = num_irqs(adapter);
1753
1754 for_all_evt_queues(adapter, eqo, i) {
1755 eqo->adapter = adapter;
1756 eqo->tx_budget = BE_TX_BUDGET;
1757 eqo->idx = i;
1758 eqo->max_eqd = BE_MAX_EQD;
1759 eqo->enable_aic = true;
1760
1761 eq = &eqo->q;
1762 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1763 sizeof(struct be_eq_entry));
1764 if (rc)
1765 return rc;
1766
1767 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1768 if (rc)
1769 return rc;
1770 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001771 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001772}
1773
Sathya Perla5fb379e2009-06-18 00:02:59 +00001774static void be_mcc_queues_destroy(struct be_adapter *adapter)
1775{
1776 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001777
Sathya Perla8788fdc2009-07-27 22:52:03 +00001778 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001779 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001780 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001781 be_queue_free(adapter, q);
1782
Sathya Perla8788fdc2009-07-27 22:52:03 +00001783 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001784 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001785 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001786 be_queue_free(adapter, q);
1787}
1788
1789/* Must be called only after TX qs are created as MCC shares TX EQ */
1790static int be_mcc_queues_create(struct be_adapter *adapter)
1791{
1792 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001793
Sathya Perla8788fdc2009-07-27 22:52:03 +00001794 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001795 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001796 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001797 goto err;
1798
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001799 /* Use the default EQ for MCC completions */
1800 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001801 goto mcc_cq_free;
1802
Sathya Perla8788fdc2009-07-27 22:52:03 +00001803 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001804 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1805 goto mcc_cq_destroy;
1806
Sathya Perla8788fdc2009-07-27 22:52:03 +00001807 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001808 goto mcc_q_free;
1809
1810 return 0;
1811
1812mcc_q_free:
1813 be_queue_free(adapter, q);
1814mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001815 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001816mcc_cq_free:
1817 be_queue_free(adapter, cq);
1818err:
1819 return -1;
1820}
1821
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822static void be_tx_queues_destroy(struct be_adapter *adapter)
1823{
1824 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001825 struct be_tx_obj *txo;
1826 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827
Sathya Perla3c8def92011-06-12 20:01:58 +00001828 for_all_tx_queues(adapter, txo, i) {
1829 q = &txo->q;
1830 if (q->created)
1831 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1832 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833
Sathya Perla3c8def92011-06-12 20:01:58 +00001834 q = &txo->cq;
1835 if (q->created)
1836 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1837 be_queue_free(adapter, q);
1838 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839}
1840
Sathya Perladafc0fe2011-10-24 02:45:02 +00001841static int be_num_txqs_want(struct be_adapter *adapter)
1842{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001843 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1844 be_is_mc(adapter) ||
1845 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perla39f1d942012-05-08 19:41:24 +00001846 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001847 return 1;
1848 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001849 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001850}
1851
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001852static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001854 struct be_queue_info *cq, *eq;
1855 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001856 struct be_tx_obj *txo;
1857 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858
Sathya Perladafc0fe2011-10-24 02:45:02 +00001859 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001860 if (adapter->num_tx_qs != MAX_TX_QS) {
1861 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001862 netif_set_real_num_tx_queues(adapter->netdev,
1863 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001864 rtnl_unlock();
1865 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001866
Sathya Perla3c8def92011-06-12 20:01:58 +00001867 for_all_tx_queues(adapter, txo, i) {
1868 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001869 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1870 sizeof(struct be_eth_tx_compl));
1871 if (status)
1872 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001874 /* If num_evt_qs is less than num_tx_qs, then more than
1875 * one txq share an eq
1876 */
1877 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1878 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1879 if (status)
1880 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001881 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883}
1884
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001885static int be_tx_qs_create(struct be_adapter *adapter)
1886{
1887 struct be_tx_obj *txo;
1888 int i, status;
1889
1890 for_all_tx_queues(adapter, txo, i) {
1891 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1892 sizeof(struct be_eth_wrb));
1893 if (status)
1894 return status;
1895
1896 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1897 if (status)
1898 return status;
1899 }
1900
Sathya Perlad3791422012-09-28 04:39:44 +00001901 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1902 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001903 return 0;
1904}
1905
1906static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907{
1908 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001909 struct be_rx_obj *rxo;
1910 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911
Sathya Perla3abcded2010-10-03 22:12:27 -07001912 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001913 q = &rxo->cq;
1914 if (q->created)
1915 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1916 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918}
1919
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001920static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001921{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001922 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001923 struct be_rx_obj *rxo;
1924 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001925
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001926 /* We'll create as many RSS rings as there are irqs.
1927 * But when there's only one irq there's no use creating RSS rings
1928 */
1929 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1930 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001931 if (adapter->num_rx_qs != MAX_RX_QS) {
1932 rtnl_lock();
1933 netif_set_real_num_rx_queues(adapter->netdev,
1934 adapter->num_rx_qs);
1935 rtnl_unlock();
1936 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001937
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001939 for_all_rx_queues(adapter, rxo, i) {
1940 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001941 cq = &rxo->cq;
1942 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1943 sizeof(struct be_eth_rx_compl));
1944 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001947 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1948 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001949 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001951 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952
Sathya Perlad3791422012-09-28 04:39:44 +00001953 dev_info(&adapter->pdev->dev,
1954 "created %d RSS queue(s) and 1 default RX queue\n",
1955 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001956 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001957}
1958
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959static irqreturn_t be_intx(int irq, void *dev)
1960{
1961 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001964 /* With INTx only one EQ is used */
1965 num_evts = event_handle(&adapter->eq_obj[0]);
1966 if (num_evts)
1967 return IRQ_HANDLED;
1968 else
1969 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970}
1971
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001972static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001974 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001976 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977 return IRQ_HANDLED;
1978}
1979
Sathya Perla2e588f82011-03-11 02:49:26 +00001980static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981{
Sathya Perla2e588f82011-03-11 02:49:26 +00001982 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983}
1984
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1986 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987{
Sathya Perla3abcded2010-10-03 22:12:27 -07001988 struct be_adapter *adapter = rxo->adapter;
1989 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001990 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991 u32 work_done;
1992
1993 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001994 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 if (!rxcp)
1996 break;
1997
Sathya Perla12004ae2011-08-02 19:57:46 +00001998 /* Is it a flush compl that has no data */
1999 if (unlikely(rxcp->num_rcvd == 0))
2000 goto loop_continue;
2001
2002 /* Discard compl with partial DMA Lancer B0 */
2003 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002005 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002006 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002007
Sathya Perla12004ae2011-08-02 19:57:46 +00002008 /* On BE drop pkts that arrive due to imperfect filtering in
2009 * promiscuous mode on some skews
2010 */
2011 if (unlikely(rxcp->port != adapter->port_num &&
2012 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002013 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002014 goto loop_continue;
2015 }
2016
2017 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002019 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002020 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002021loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002022 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023 }
2024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025 if (work_done) {
2026 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002027
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002028 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2029 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002030 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032 return work_done;
2033}
2034
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002035static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2036 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002039 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002041 for (work_done = 0; work_done < budget; work_done++) {
2042 txcp = be_tx_compl_get(&txo->cq);
2043 if (!txcp)
2044 break;
2045 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002046 AMAP_GET_BITS(struct amap_eth_tx_compl,
2047 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002048 }
2049
2050 if (work_done) {
2051 be_cq_notify(adapter, txo->cq.id, true, work_done);
2052 atomic_sub(num_wrbs, &txo->q.used);
2053
2054 /* As Tx wrbs have been freed up, wake up netdev queue
2055 * if it was stopped due to lack of tx wrbs. */
2056 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2057 atomic_read(&txo->q.used) < txo->q.len / 2) {
2058 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002059 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002060
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002061 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2062 tx_stats(txo)->tx_compl += work_done;
2063 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2064 }
2065 return (work_done < budget); /* Done */
2066}
Sathya Perla3c8def92011-06-12 20:01:58 +00002067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002068int be_poll(struct napi_struct *napi, int budget)
2069{
2070 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2071 struct be_adapter *adapter = eqo->adapter;
2072 int max_work = 0, work, i;
2073 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002074
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002075 /* Process all TXQs serviced by this EQ */
2076 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2077 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2078 eqo->tx_budget, i);
2079 if (!tx_done)
2080 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081 }
2082
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002083 /* This loop will iterate twice for EQ0 in which
2084 * completions of the last RXQ (default one) are also processed
2085 * For other EQs the loop iterates only once
2086 */
2087 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2088 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2089 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002090 }
2091
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002092 if (is_mcc_eqo(eqo))
2093 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002094
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002095 if (max_work < budget) {
2096 napi_complete(napi);
2097 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2098 } else {
2099 /* As we'll continue in polling mode, count and clear events */
2100 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002101 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002103}
2104
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002105void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002106{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002107 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2108 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002109 u32 i;
2110
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002111 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002112 return;
2113
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002114 if (lancer_chip(adapter)) {
2115 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2116 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2117 sliport_err1 = ioread32(adapter->db +
2118 SLIPORT_ERROR1_OFFSET);
2119 sliport_err2 = ioread32(adapter->db +
2120 SLIPORT_ERROR2_OFFSET);
2121 }
2122 } else {
2123 pci_read_config_dword(adapter->pdev,
2124 PCICFG_UE_STATUS_LOW, &ue_lo);
2125 pci_read_config_dword(adapter->pdev,
2126 PCICFG_UE_STATUS_HIGH, &ue_hi);
2127 pci_read_config_dword(adapter->pdev,
2128 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2129 pci_read_config_dword(adapter->pdev,
2130 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002131
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002132 ue_lo = (ue_lo & ~ue_lo_mask);
2133 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002134 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002135
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002136 /* On certain platforms BE hardware can indicate spurious UEs.
2137 * Allow the h/w to stop working completely in case of a real UE.
2138 * Hence not setting the hw_error for UE detection.
2139 */
2140 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002141 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002142 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002143 "Error detected in the card\n");
2144 }
2145
2146 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2147 dev_err(&adapter->pdev->dev,
2148 "ERR: sliport status 0x%x\n", sliport_status);
2149 dev_err(&adapter->pdev->dev,
2150 "ERR: sliport error1 0x%x\n", sliport_err1);
2151 dev_err(&adapter->pdev->dev,
2152 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002153 }
2154
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002155 if (ue_lo) {
2156 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2157 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002158 dev_err(&adapter->pdev->dev,
2159 "UE: %s bit set\n", ue_status_low_desc[i]);
2160 }
2161 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002162
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002163 if (ue_hi) {
2164 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2165 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002166 dev_err(&adapter->pdev->dev,
2167 "UE: %s bit set\n", ue_status_hi_desc[i]);
2168 }
2169 }
2170
2171}
2172
Sathya Perla8d56ff12009-11-22 22:02:26 +00002173static void be_msix_disable(struct be_adapter *adapter)
2174{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002175 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002176 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002177 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 }
2179}
2180
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002181static uint be_num_rss_want(struct be_adapter *adapter)
2182{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002183 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002184
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002185 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002186 (lancer_chip(adapter) ||
2187 (!sriov_want(adapter) && be_physfn(adapter)))) {
2188 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002189 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2190 }
2191 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002192}
2193
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194static void be_msix_enable(struct be_adapter *adapter)
2195{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002197 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002198 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 /* If RSS queues are not used, need a vec for default RX Q */
2201 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002202 if (be_roce_supported(adapter)) {
2203 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2204 (num_online_cpus() + 1));
2205 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2206 num_vec += num_roce_vec;
2207 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2208 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002209 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002210
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002211 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212 adapter->msix_entries[i].entry = i;
2213
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002214 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002215 if (status == 0) {
2216 goto done;
2217 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002218 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002219 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002220 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002221 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002222 }
Sathya Perlad3791422012-09-28 04:39:44 +00002223
2224 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002225 return;
2226done:
Parav Pandit045508a2012-03-26 14:27:13 +00002227 if (be_roce_supported(adapter)) {
2228 if (num_vec > num_roce_vec) {
2229 adapter->num_msix_vec = num_vec - num_roce_vec;
2230 adapter->num_msix_roce_vec =
2231 num_vec - adapter->num_msix_vec;
2232 } else {
2233 adapter->num_msix_vec = num_vec;
2234 adapter->num_msix_roce_vec = 0;
2235 }
2236 } else
2237 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002238 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002239 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240}
2241
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002242static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002243 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002245 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246}
2247
2248static int be_msix_register(struct be_adapter *adapter)
2249{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250 struct net_device *netdev = adapter->netdev;
2251 struct be_eq_obj *eqo;
2252 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254 for_all_evt_queues(adapter, eqo, i) {
2255 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2256 vec = be_msix_vec_get(adapter, eqo);
2257 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002258 if (status)
2259 goto err_msix;
2260 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002261
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002263err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002264 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2265 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2266 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2267 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002268 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269 return status;
2270}
2271
2272static int be_irq_register(struct be_adapter *adapter)
2273{
2274 struct net_device *netdev = adapter->netdev;
2275 int status;
2276
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002277 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002278 status = be_msix_register(adapter);
2279 if (status == 0)
2280 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002281 /* INTx is not supported for VF */
2282 if (!be_physfn(adapter))
2283 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284 }
2285
2286 /* INTx */
2287 netdev->irq = adapter->pdev->irq;
2288 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2289 adapter);
2290 if (status) {
2291 dev_err(&adapter->pdev->dev,
2292 "INTx request IRQ failed - err %d\n", status);
2293 return status;
2294 }
2295done:
2296 adapter->isr_registered = true;
2297 return 0;
2298}
2299
2300static void be_irq_unregister(struct be_adapter *adapter)
2301{
2302 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002303 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002304 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305
2306 if (!adapter->isr_registered)
2307 return;
2308
2309 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002310 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002311 free_irq(netdev->irq, adapter);
2312 goto done;
2313 }
2314
2315 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002316 for_all_evt_queues(adapter, eqo, i)
2317 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002318
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002319done:
2320 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321}
2322
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002324{
2325 struct be_queue_info *q;
2326 struct be_rx_obj *rxo;
2327 int i;
2328
2329 for_all_rx_queues(adapter, rxo, i) {
2330 q = &rxo->q;
2331 if (q->created) {
2332 be_cmd_rxq_destroy(adapter, q);
2333 /* After the rxq is invalidated, wait for a grace time
2334 * of 1ms for all dma to end and the flush compl to
2335 * arrive
2336 */
2337 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002338 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002339 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002341 }
2342}
2343
Sathya Perla889cd4b2010-05-30 23:33:45 +00002344static int be_close(struct net_device *netdev)
2345{
2346 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002347 struct be_eq_obj *eqo;
2348 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002349
Parav Pandit045508a2012-03-26 14:27:13 +00002350 be_roce_dev_close(adapter);
2351
Sathya Perla889cd4b2010-05-30 23:33:45 +00002352 be_async_mcc_disable(adapter);
2353
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002354 if (!lancer_chip(adapter))
2355 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002356
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002357 for_all_evt_queues(adapter, eqo, i) {
2358 napi_disable(&eqo->napi);
2359 if (msix_enabled(adapter))
2360 synchronize_irq(be_msix_vec_get(adapter, eqo));
2361 else
2362 synchronize_irq(netdev->irq);
2363 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002364 }
2365
Sathya Perla889cd4b2010-05-30 23:33:45 +00002366 be_irq_unregister(adapter);
2367
Sathya Perla889cd4b2010-05-30 23:33:45 +00002368 /* Wait for all pending tx completions to arrive so that
2369 * all tx skbs are freed.
2370 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002371 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002372
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002373 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002374 return 0;
2375}
2376
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002377static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002378{
2379 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002380 int rc, i, j;
2381 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002382
2383 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2385 sizeof(struct be_eth_rx_d));
2386 if (rc)
2387 return rc;
2388 }
2389
2390 /* The FW would like the default RXQ to be created first */
2391 rxo = default_rxo(adapter);
2392 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2393 adapter->if_handle, false, &rxo->rss_id);
2394 if (rc)
2395 return rc;
2396
2397 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002398 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002399 rx_frag_size, adapter->if_handle,
2400 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002401 if (rc)
2402 return rc;
2403 }
2404
2405 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002406 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2407 for_all_rss_queues(adapter, rxo, i) {
2408 if ((j + i) >= 128)
2409 break;
2410 rsstable[j + i] = rxo->rss_id;
2411 }
2412 }
2413 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002414 if (rc)
2415 return rc;
2416 }
2417
2418 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002419 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002420 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002421 return 0;
2422}
2423
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002424static int be_open(struct net_device *netdev)
2425{
2426 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002427 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002428 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002429 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002430 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002431 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002432
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002434 if (status)
2435 goto err;
2436
Sathya Perla5fb379e2009-06-18 00:02:59 +00002437 be_irq_register(adapter);
2438
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002439 if (!lancer_chip(adapter))
2440 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002441
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002443 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002444
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002445 for_all_tx_queues(adapter, txo, i)
2446 be_cq_notify(adapter, txo->cq.id, true, 0);
2447
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002448 be_async_mcc_enable(adapter);
2449
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002450 for_all_evt_queues(adapter, eqo, i) {
2451 napi_enable(&eqo->napi);
2452 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2453 }
2454
Sathya Perla323ff712012-09-28 04:39:43 +00002455 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002456 if (!status)
2457 be_link_status_update(adapter, link_status);
2458
Parav Pandit045508a2012-03-26 14:27:13 +00002459 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002460 return 0;
2461err:
2462 be_close(adapter->netdev);
2463 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002464}
2465
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002466static int be_setup_wol(struct be_adapter *adapter, bool enable)
2467{
2468 struct be_dma_mem cmd;
2469 int status = 0;
2470 u8 mac[ETH_ALEN];
2471
2472 memset(mac, 0, ETH_ALEN);
2473
2474 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002475 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2476 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002477 if (cmd.va == NULL)
2478 return -1;
2479 memset(cmd.va, 0, cmd.size);
2480
2481 if (enable) {
2482 status = pci_write_config_dword(adapter->pdev,
2483 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2484 if (status) {
2485 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002486 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002487 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2488 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002489 return status;
2490 }
2491 status = be_cmd_enable_magic_wol(adapter,
2492 adapter->netdev->dev_addr, &cmd);
2493 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2494 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2495 } else {
2496 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2497 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2498 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2499 }
2500
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002501 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002502 return status;
2503}
2504
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002505/*
2506 * Generate a seed MAC address from the PF MAC Address using jhash.
2507 * MAC Address for VFs are assigned incrementally starting from the seed.
2508 * These addresses are programmed in the ASIC by the PF and the VF driver
2509 * queries for the MAC address during its probe.
2510 */
2511static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2512{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002513 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002514 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002515 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002516 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002517
2518 be_vf_eth_addr_generate(adapter, mac);
2519
Sathya Perla11ac75e2011-12-13 00:58:50 +00002520 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002521 if (lancer_chip(adapter)) {
2522 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2523 } else {
2524 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002525 vf_cfg->if_handle,
2526 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002527 }
2528
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002529 if (status)
2530 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002531 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002532 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002533 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002534
2535 mac[5] += 1;
2536 }
2537 return status;
2538}
2539
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002540static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002541{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002542 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002543 u32 vf;
2544
Sathya Perla39f1d942012-05-08 19:41:24 +00002545 if (be_find_vfs(adapter, ASSIGNED)) {
2546 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2547 goto done;
2548 }
2549
Sathya Perla11ac75e2011-12-13 00:58:50 +00002550 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002551 if (lancer_chip(adapter))
2552 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2553 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002554 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2555 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002556
Sathya Perla11ac75e2011-12-13 00:58:50 +00002557 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2558 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002559 pci_disable_sriov(adapter->pdev);
2560done:
2561 kfree(adapter->vf_cfg);
2562 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002563}
2564
Sathya Perlaa54769f2011-10-24 02:45:00 +00002565static int be_clear(struct be_adapter *adapter)
2566{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002567 int i = 1;
2568
Sathya Perla191eb752012-02-23 18:50:13 +00002569 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2570 cancel_delayed_work_sync(&adapter->work);
2571 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2572 }
2573
Sathya Perla11ac75e2011-12-13 00:58:50 +00002574 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002575 be_vf_clear(adapter);
2576
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002577 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2578 be_cmd_pmac_del(adapter, adapter->if_handle,
2579 adapter->pmac_id[i], 0);
2580
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002581 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002582
2583 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002584 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002585 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002586 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002587
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002588 kfree(adapter->pmac_id);
2589 adapter->pmac_id = NULL;
2590
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002591 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002592 return 0;
2593}
2594
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002595static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2596 u32 *cap_flags, u8 domain)
2597{
2598 bool profile_present = false;
2599 int status;
2600
2601 if (lancer_chip(adapter)) {
2602 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2603 if (!status)
2604 profile_present = true;
2605 }
2606
2607 if (!profile_present)
2608 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2609 BE_IF_FLAGS_MULTICAST;
2610}
2611
Sathya Perla39f1d942012-05-08 19:41:24 +00002612static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002613{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002614 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002615 int vf;
2616
Sathya Perla39f1d942012-05-08 19:41:24 +00002617 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2618 GFP_KERNEL);
2619 if (!adapter->vf_cfg)
2620 return -ENOMEM;
2621
Sathya Perla11ac75e2011-12-13 00:58:50 +00002622 for_all_vfs(adapter, vf_cfg, vf) {
2623 vf_cfg->if_handle = -1;
2624 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002625 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002626 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002627}
2628
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002629static int be_vf_setup(struct be_adapter *adapter)
2630{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002631 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002632 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002633 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002634 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002635 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002636
Sathya Perla39f1d942012-05-08 19:41:24 +00002637 enabled_vfs = be_find_vfs(adapter, ENABLED);
2638 if (enabled_vfs) {
2639 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2640 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2641 return 0;
2642 }
2643
2644 if (num_vfs > adapter->dev_num_vfs) {
2645 dev_warn(dev, "Device supports %d VFs and not %d\n",
2646 adapter->dev_num_vfs, num_vfs);
2647 num_vfs = adapter->dev_num_vfs;
2648 }
2649
2650 status = pci_enable_sriov(adapter->pdev, num_vfs);
2651 if (!status) {
2652 adapter->num_vfs = num_vfs;
2653 } else {
2654 /* Platform doesn't support SRIOV though device supports it */
2655 dev_warn(dev, "SRIOV enable failed\n");
2656 return 0;
2657 }
2658
2659 status = be_vf_setup_init(adapter);
2660 if (status)
2661 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002662
Sathya Perla11ac75e2011-12-13 00:58:50 +00002663 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002664 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2665
2666 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2667 BE_IF_FLAGS_BROADCAST |
2668 BE_IF_FLAGS_MULTICAST);
2669
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002670 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2671 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002672 if (status)
2673 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002674 }
2675
Sathya Perla39f1d942012-05-08 19:41:24 +00002676 if (!enabled_vfs) {
2677 status = be_vf_eth_addr_config(adapter);
2678 if (status)
2679 goto err;
2680 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002681
Sathya Perla11ac75e2011-12-13 00:58:50 +00002682 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002683 lnk_speed = 1000;
2684 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002685 if (status)
2686 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002687 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002688
2689 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2690 vf + 1, vf_cfg->if_handle);
2691 if (status)
2692 goto err;
2693 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002694 }
2695 return 0;
2696err:
2697 return status;
2698}
2699
Sathya Perla30128032011-11-10 19:17:57 +00002700static void be_setup_init(struct be_adapter *adapter)
2701{
2702 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002703 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002704 adapter->if_handle = -1;
2705 adapter->be3_native = false;
2706 adapter->promiscuous = false;
2707 adapter->eq_next_idx = 0;
2708}
2709
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002710static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2711 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002712{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002713 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002714
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002715 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2716 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2717 if (!lancer_chip(adapter) && !be_physfn(adapter))
2718 *active_mac = true;
2719 else
2720 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002721
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002722 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002723 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002724
2725 if (lancer_chip(adapter)) {
2726 status = be_cmd_get_mac_from_list(adapter, mac,
2727 active_mac, pmac_id, 0);
2728 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002729 status = be_cmd_mac_addr_query(adapter, mac, false,
2730 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002731 }
2732 } else if (be_physfn(adapter)) {
2733 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002734 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002735 *active_mac = false;
2736 } else {
2737 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002738 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002739 if_handle, 0);
2740 *active_mac = true;
2741 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002742 return status;
2743}
2744
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002745static void be_get_resources(struct be_adapter *adapter)
2746{
2747 int status;
2748 bool profile_present = false;
2749
2750 if (lancer_chip(adapter)) {
2751 status = be_cmd_get_func_config(adapter);
2752
2753 if (!status)
2754 profile_present = true;
2755 }
2756
2757 if (profile_present) {
2758 /* Sanity fixes for Lancer */
2759 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2760 BE_UC_PMAC_COUNT);
2761 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2762 BE_NUM_VLANS_SUPPORTED);
2763 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2764 BE_MAX_MC);
2765 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2766 MAX_TX_QS);
2767 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2768 BE3_MAX_RSS_QS);
2769 adapter->max_event_queues = min_t(u16,
2770 adapter->max_event_queues,
2771 BE3_MAX_RSS_QS);
2772
2773 if (adapter->max_rss_queues &&
2774 adapter->max_rss_queues == adapter->max_rx_queues)
2775 adapter->max_rss_queues -= 1;
2776
2777 if (adapter->max_event_queues < adapter->max_rss_queues)
2778 adapter->max_rss_queues = adapter->max_event_queues;
2779
2780 } else {
2781 if (be_physfn(adapter))
2782 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2783 else
2784 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2785
2786 if (adapter->function_mode & FLEX10_MODE)
2787 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2788 else
2789 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2790
2791 adapter->max_mcast_mac = BE_MAX_MC;
2792 adapter->max_tx_queues = MAX_TX_QS;
2793 adapter->max_rss_queues = (adapter->be3_native) ?
2794 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2795 adapter->max_event_queues = BE3_MAX_RSS_QS;
2796
2797 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2798 BE_IF_FLAGS_BROADCAST |
2799 BE_IF_FLAGS_MULTICAST |
2800 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2801 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2802 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2803 BE_IF_FLAGS_PROMISCUOUS;
2804
2805 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2806 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2807 }
2808}
2809
Sathya Perla39f1d942012-05-08 19:41:24 +00002810/* Routine to query per function resource limits */
2811static int be_get_config(struct be_adapter *adapter)
2812{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002813 int pos, status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002814 u16 dev_num_vfs;
2815
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002816 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2817 &adapter->function_mode,
2818 &adapter->function_caps);
2819 if (status)
2820 goto err;
2821
2822 be_get_resources(adapter);
2823
2824 /* primary mac needs 1 pmac entry */
2825 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2826 sizeof(u32), GFP_KERNEL);
2827 if (!adapter->pmac_id) {
2828 status = -ENOMEM;
2829 goto err;
2830 }
2831
Sathya Perla39f1d942012-05-08 19:41:24 +00002832 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2833 if (pos) {
2834 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2835 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002836 if (!lancer_chip(adapter))
2837 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002838 adapter->dev_num_vfs = dev_num_vfs;
2839 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002840err:
2841 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002842}
2843
Sathya Perla5fb379e2009-06-18 00:02:59 +00002844static int be_setup(struct be_adapter *adapter)
2845{
Sathya Perla39f1d942012-05-08 19:41:24 +00002846 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002847 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002848 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002849 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002850 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002851 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002852
Sathya Perla30128032011-11-10 19:17:57 +00002853 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002854
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002855 if (!lancer_chip(adapter))
2856 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002857
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002858 status = be_get_config(adapter);
2859 if (status)
2860 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002861
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002862 be_msix_enable(adapter);
2863
2864 status = be_evt_queues_create(adapter);
2865 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002866 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002867
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002868 status = be_tx_cqs_create(adapter);
2869 if (status)
2870 goto err;
2871
2872 status = be_rx_cqs_create(adapter);
2873 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002874 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002875
Sathya Perla5fb379e2009-06-18 00:02:59 +00002876 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002877 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002878 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002879
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002880 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2881 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002882
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002883 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002884 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002885
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002886 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002887
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002888 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002889 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002890 if (status != 0)
2891 goto err;
2892
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002893 memset(mac, 0, ETH_ALEN);
2894 active_mac = false;
2895 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2896 &active_mac, &adapter->pmac_id[0]);
2897 if (status != 0)
2898 goto err;
2899
2900 if (!active_mac) {
2901 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2902 &adapter->pmac_id[0], 0);
2903 if (status != 0)
2904 goto err;
2905 }
2906
2907 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2908 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2909 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002910 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002911
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002912 status = be_tx_qs_create(adapter);
2913 if (status)
2914 goto err;
2915
Sathya Perla04b71172011-09-27 13:30:27 -04002916 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002917
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002918 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002919 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002920
2921 be_set_rx_mode(adapter->netdev);
2922
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002923 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002924
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002925 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2926 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002927 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002928
Sathya Perla39f1d942012-05-08 19:41:24 +00002929 if (be_physfn(adapter) && num_vfs) {
2930 if (adapter->dev_num_vfs)
2931 be_vf_setup(adapter);
2932 else
2933 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002934 }
2935
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002936 be_cmd_get_phy_info(adapter);
2937 if (be_pause_supported(adapter))
2938 adapter->phy.fc_autoneg = 1;
2939
Sathya Perla191eb752012-02-23 18:50:13 +00002940 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2941 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002942 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002943err:
2944 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002945 return status;
2946}
2947
Ivan Vecera66268732011-12-08 01:31:21 +00002948#ifdef CONFIG_NET_POLL_CONTROLLER
2949static void be_netpoll(struct net_device *netdev)
2950{
2951 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002952 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002953 int i;
2954
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002955 for_all_evt_queues(adapter, eqo, i)
2956 event_handle(eqo);
2957
2958 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002959}
2960#endif
2961
Ajit Khaparde84517482009-09-04 03:12:16 +00002962#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002963char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2964
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002965static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002966 const u8 *p, u32 img_start, int image_size,
2967 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002968{
2969 u32 crc_offset;
2970 u8 flashed_crc[4];
2971 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002972
2973 crc_offset = hdr_size + img_start + image_size - 4;
2974
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002975 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002976
2977 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002978 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002979 if (status) {
2980 dev_err(&adapter->pdev->dev,
2981 "could not get crc from flash, not flashing redboot\n");
2982 return false;
2983 }
2984
2985 /*update redboot only if crc does not match*/
2986 if (!memcmp(flashed_crc, p, 4))
2987 return false;
2988 else
2989 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002990}
2991
Sathya Perla306f1342011-08-02 19:57:45 +00002992static bool phy_flashing_required(struct be_adapter *adapter)
2993{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002994 return (adapter->phy.phy_type == TN_8022 &&
2995 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002996}
2997
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002998static bool is_comp_in_ufi(struct be_adapter *adapter,
2999 struct flash_section_info *fsec, int type)
3000{
3001 int i = 0, img_type = 0;
3002 struct flash_section_info_g2 *fsec_g2 = NULL;
3003
3004 if (adapter->generation != BE_GEN3)
3005 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3006
3007 for (i = 0; i < MAX_FLASH_COMP; i++) {
3008 if (fsec_g2)
3009 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3010 else
3011 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3012
3013 if (img_type == type)
3014 return true;
3015 }
3016 return false;
3017
3018}
3019
3020struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3021 int header_size,
3022 const struct firmware *fw)
3023{
3024 struct flash_section_info *fsec = NULL;
3025 const u8 *p = fw->data;
3026
3027 p += header_size;
3028 while (p < (fw->data + fw->size)) {
3029 fsec = (struct flash_section_info *)p;
3030 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3031 return fsec;
3032 p += 32;
3033 }
3034 return NULL;
3035}
3036
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003037static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003038 const struct firmware *fw,
3039 struct be_dma_mem *flash_cmd,
3040 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003041
Ajit Khaparde84517482009-09-04 03:12:16 +00003042{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003043 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003044 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003045 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00003046 int num_bytes;
3047 const u8 *p = fw->data;
3048 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08003049 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003050 int num_comp, hdr_size;
3051 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003052
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003053 struct flash_comp gen3_flash_types[] = {
3054 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3055 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3056 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3057 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3058 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3059 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3060 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3061 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3062 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3063 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3064 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3065 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3066 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3067 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3068 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3069 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3070 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3071 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3072 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3073 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003074 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003075
3076 struct flash_comp gen2_flash_types[] = {
3077 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3078 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3079 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3080 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3081 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3082 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3083 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3084 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3085 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3086 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3087 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3088 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3089 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3090 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3091 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3092 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003093 };
3094
3095 if (adapter->generation == BE_GEN3) {
3096 pflashcomp = gen3_flash_types;
3097 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003098 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003099 } else {
3100 pflashcomp = gen2_flash_types;
3101 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003102 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003103 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003104 /* Get flash section info*/
3105 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3106 if (!fsec) {
3107 dev_err(&adapter->pdev->dev,
3108 "Invalid Cookie. UFI corrupted ?\n");
3109 return -1;
3110 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003111 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003112 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003113 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003114
3115 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3116 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3117 continue;
3118
3119 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003120 if (!phy_flashing_required(adapter))
3121 continue;
3122 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003123
3124 hdr_size = filehdr_size +
3125 (num_of_images * sizeof(struct image_hdr));
3126
3127 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3128 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3129 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003130 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003131
3132 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003133 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003134 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003135 if (p + pflashcomp[i].size > fw->data + fw->size)
3136 return -1;
3137 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003138 while (total_bytes) {
3139 if (total_bytes > 32*1024)
3140 num_bytes = 32*1024;
3141 else
3142 num_bytes = total_bytes;
3143 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003144 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003145 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003146 flash_op = FLASHROM_OPER_PHY_FLASH;
3147 else
3148 flash_op = FLASHROM_OPER_FLASH;
3149 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003150 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003151 flash_op = FLASHROM_OPER_PHY_SAVE;
3152 else
3153 flash_op = FLASHROM_OPER_SAVE;
3154 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003155 memcpy(req->params.data_buf, p, num_bytes);
3156 p += num_bytes;
3157 status = be_cmd_write_flashrom(adapter, flash_cmd,
3158 pflashcomp[i].optype, flash_op, num_bytes);
3159 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003160 if ((status == ILLEGAL_IOCTL_REQ) &&
3161 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003162 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003163 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003164 dev_err(&adapter->pdev->dev,
3165 "cmd to write to flash rom failed.\n");
3166 return -1;
3167 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003168 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003169 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003170 return 0;
3171}
3172
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003173static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3174{
3175 if (fhdr == NULL)
3176 return 0;
3177 if (fhdr->build[0] == '3')
3178 return BE_GEN3;
3179 else if (fhdr->build[0] == '2')
3180 return BE_GEN2;
3181 else
3182 return 0;
3183}
3184
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003185static int lancer_wait_idle(struct be_adapter *adapter)
3186{
3187#define SLIPORT_IDLE_TIMEOUT 30
3188 u32 reg_val;
3189 int status = 0, i;
3190
3191 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3192 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3193 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3194 break;
3195
3196 ssleep(1);
3197 }
3198
3199 if (i == SLIPORT_IDLE_TIMEOUT)
3200 status = -1;
3201
3202 return status;
3203}
3204
3205static int lancer_fw_reset(struct be_adapter *adapter)
3206{
3207 int status = 0;
3208
3209 status = lancer_wait_idle(adapter);
3210 if (status)
3211 return status;
3212
3213 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3214 PHYSDEV_CONTROL_OFFSET);
3215
3216 return status;
3217}
3218
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003219static int lancer_fw_download(struct be_adapter *adapter,
3220 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003221{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003222#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3223#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3224 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003225 const u8 *data_ptr = NULL;
3226 u8 *dest_image_ptr = NULL;
3227 size_t image_size = 0;
3228 u32 chunk_size = 0;
3229 u32 data_written = 0;
3230 u32 offset = 0;
3231 int status = 0;
3232 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003233 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003234
3235 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3236 dev_err(&adapter->pdev->dev,
3237 "FW Image not properly aligned. "
3238 "Length must be 4 byte aligned.\n");
3239 status = -EINVAL;
3240 goto lancer_fw_exit;
3241 }
3242
3243 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3244 + LANCER_FW_DOWNLOAD_CHUNK;
3245 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3246 &flash_cmd.dma, GFP_KERNEL);
3247 if (!flash_cmd.va) {
3248 status = -ENOMEM;
3249 dev_err(&adapter->pdev->dev,
3250 "Memory allocation failure while flashing\n");
3251 goto lancer_fw_exit;
3252 }
3253
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003254 dest_image_ptr = flash_cmd.va +
3255 sizeof(struct lancer_cmd_req_write_object);
3256 image_size = fw->size;
3257 data_ptr = fw->data;
3258
3259 while (image_size) {
3260 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3261
3262 /* Copy the image chunk content. */
3263 memcpy(dest_image_ptr, data_ptr, chunk_size);
3264
3265 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003266 chunk_size, offset,
3267 LANCER_FW_DOWNLOAD_LOCATION,
3268 &data_written, &change_status,
3269 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003270 if (status)
3271 break;
3272
3273 offset += data_written;
3274 data_ptr += data_written;
3275 image_size -= data_written;
3276 }
3277
3278 if (!status) {
3279 /* Commit the FW written */
3280 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003281 0, offset,
3282 LANCER_FW_DOWNLOAD_LOCATION,
3283 &data_written, &change_status,
3284 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003285 }
3286
3287 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3288 flash_cmd.dma);
3289 if (status) {
3290 dev_err(&adapter->pdev->dev,
3291 "Firmware load error. "
3292 "Status code: 0x%x Additional Status: 0x%x\n",
3293 status, add_status);
3294 goto lancer_fw_exit;
3295 }
3296
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003297 if (change_status == LANCER_FW_RESET_NEEDED) {
3298 status = lancer_fw_reset(adapter);
3299 if (status) {
3300 dev_err(&adapter->pdev->dev,
3301 "Adapter busy for FW reset.\n"
3302 "New FW will not be active.\n");
3303 goto lancer_fw_exit;
3304 }
3305 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3306 dev_err(&adapter->pdev->dev,
3307 "System reboot required for new FW"
3308 " to be active\n");
3309 }
3310
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003311 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3312lancer_fw_exit:
3313 return status;
3314}
3315
3316static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3317{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003318 struct flash_file_hdr_g2 *fhdr;
3319 struct flash_file_hdr_g3 *fhdr3;
3320 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003321 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003322 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003323 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003324
3325 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003326 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003327
Ajit Khaparde84517482009-09-04 03:12:16 +00003328 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003329 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3330 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003331 if (!flash_cmd.va) {
3332 status = -ENOMEM;
3333 dev_err(&adapter->pdev->dev,
3334 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003335 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003336 }
3337
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003338 if ((adapter->generation == BE_GEN3) &&
3339 (get_ufigen_type(fhdr) == BE_GEN3)) {
3340 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003341 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3342 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003343 img_hdr_ptr = (struct image_hdr *) (fw->data +
3344 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003345 i * sizeof(struct image_hdr)));
3346 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3347 status = be_flash_data(adapter, fw, &flash_cmd,
3348 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003349 }
3350 } else if ((adapter->generation == BE_GEN2) &&
3351 (get_ufigen_type(fhdr) == BE_GEN2)) {
3352 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3353 } else {
3354 dev_err(&adapter->pdev->dev,
3355 "UFI and Interface are not compatible for flashing\n");
3356 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003357 }
3358
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003359 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3360 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003361 if (status) {
3362 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003363 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003364 }
3365
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003366 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003367
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003368be_fw_exit:
3369 return status;
3370}
3371
3372int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3373{
3374 const struct firmware *fw;
3375 int status;
3376
3377 if (!netif_running(adapter->netdev)) {
3378 dev_err(&adapter->pdev->dev,
3379 "Firmware load not allowed (interface is down)\n");
3380 return -1;
3381 }
3382
3383 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3384 if (status)
3385 goto fw_exit;
3386
3387 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3388
3389 if (lancer_chip(adapter))
3390 status = lancer_fw_download(adapter, fw);
3391 else
3392 status = be_fw_download(adapter, fw);
3393
Ajit Khaparde84517482009-09-04 03:12:16 +00003394fw_exit:
3395 release_firmware(fw);
3396 return status;
3397}
3398
stephen hemmingere5686ad2012-01-05 19:10:25 +00003399static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003400 .ndo_open = be_open,
3401 .ndo_stop = be_close,
3402 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003403 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003404 .ndo_set_mac_address = be_mac_addr_set,
3405 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003406 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003407 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003408 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3409 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003410 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003411 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003412 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003413 .ndo_get_vf_config = be_get_vf_config,
3414#ifdef CONFIG_NET_POLL_CONTROLLER
3415 .ndo_poll_controller = be_netpoll,
3416#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003417};
3418
3419static void be_netdev_init(struct net_device *netdev)
3420{
3421 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003422 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003423 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003424
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003425 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003426 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3427 NETIF_F_HW_VLAN_TX;
3428 if (be_multi_rxq(adapter))
3429 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003430
3431 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003432 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003433
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003434 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003435 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003436
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003437 netdev->priv_flags |= IFF_UNICAST_FLT;
3438
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003439 netdev->flags |= IFF_MULTICAST;
3440
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003441 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003442
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003443 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003444
3445 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3446
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003447 for_all_evt_queues(adapter, eqo, i)
3448 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449}
3450
3451static void be_unmap_pci_bars(struct be_adapter *adapter)
3452{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003453 if (adapter->csr)
3454 iounmap(adapter->csr);
3455 if (adapter->db)
3456 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003457 if (adapter->roce_db.base)
3458 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3459}
3460
3461static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3462{
3463 struct pci_dev *pdev = adapter->pdev;
3464 u8 __iomem *addr;
3465
3466 addr = pci_iomap(pdev, 2, 0);
3467 if (addr == NULL)
3468 return -ENOMEM;
3469
3470 adapter->roce_db.base = addr;
3471 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3472 adapter->roce_db.size = 8192;
3473 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3474 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003475}
3476
3477static int be_map_pci_bars(struct be_adapter *adapter)
3478{
3479 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003480 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003481
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003482 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003483 if (be_type_2_3(adapter)) {
3484 addr = ioremap_nocache(
3485 pci_resource_start(adapter->pdev, 0),
3486 pci_resource_len(adapter->pdev, 0));
3487 if (addr == NULL)
3488 return -ENOMEM;
3489 adapter->db = addr;
3490 }
3491 if (adapter->if_type == SLI_INTF_TYPE_3) {
3492 if (lancer_roce_map_pci_bars(adapter))
3493 goto pci_map_err;
3494 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003495 return 0;
3496 }
3497
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003498 if (be_physfn(adapter)) {
3499 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3500 pci_resource_len(adapter->pdev, 2));
3501 if (addr == NULL)
3502 return -ENOMEM;
3503 adapter->csr = addr;
3504 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003505
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003506 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003507 db_reg = 4;
3508 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003509 if (be_physfn(adapter))
3510 db_reg = 4;
3511 else
3512 db_reg = 0;
3513 }
3514 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3515 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003516 if (addr == NULL)
3517 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003518 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003519 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3520 adapter->roce_db.size = 4096;
3521 adapter->roce_db.io_addr =
3522 pci_resource_start(adapter->pdev, db_reg);
3523 adapter->roce_db.total_size =
3524 pci_resource_len(adapter->pdev, db_reg);
3525 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003526 return 0;
3527pci_map_err:
3528 be_unmap_pci_bars(adapter);
3529 return -ENOMEM;
3530}
3531
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003532static void be_ctrl_cleanup(struct be_adapter *adapter)
3533{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003534 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003535
3536 be_unmap_pci_bars(adapter);
3537
3538 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003539 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3540 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003541
Sathya Perla5b8821b2011-08-02 19:57:44 +00003542 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003543 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003544 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3545 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003546}
3547
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003548static int be_ctrl_init(struct be_adapter *adapter)
3549{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003550 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3551 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003552 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003553 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003554
3555 status = be_map_pci_bars(adapter);
3556 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003557 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003558
3559 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003560 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3561 mbox_mem_alloc->size,
3562 &mbox_mem_alloc->dma,
3563 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003564 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003565 status = -ENOMEM;
3566 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003567 }
3568 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3569 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3570 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3571 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003572
Sathya Perla5b8821b2011-08-02 19:57:44 +00003573 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3574 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3575 &rx_filter->dma, GFP_KERNEL);
3576 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003577 status = -ENOMEM;
3578 goto free_mbox;
3579 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003580 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003581 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003582 spin_lock_init(&adapter->mcc_lock);
3583 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003584
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003585 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003586 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003587 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003588
3589free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003590 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3591 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003592
3593unmap_pci_bars:
3594 be_unmap_pci_bars(adapter);
3595
3596done:
3597 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003598}
3599
3600static void be_stats_cleanup(struct be_adapter *adapter)
3601{
Sathya Perla3abcded2010-10-03 22:12:27 -07003602 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003603
3604 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003605 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3606 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003607}
3608
3609static int be_stats_init(struct be_adapter *adapter)
3610{
Sathya Perla3abcded2010-10-03 22:12:27 -07003611 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003612
Selvin Xavier005d5692011-05-16 07:36:35 +00003613 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003614 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003615 } else {
3616 if (lancer_chip(adapter))
3617 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3618 else
3619 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3620 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003621 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3622 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003623 if (cmd->va == NULL)
3624 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003625 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003626 return 0;
3627}
3628
3629static void __devexit be_remove(struct pci_dev *pdev)
3630{
3631 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003632
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003633 if (!adapter)
3634 return;
3635
Parav Pandit045508a2012-03-26 14:27:13 +00003636 be_roce_dev_remove(adapter);
3637
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003638 cancel_delayed_work_sync(&adapter->func_recovery_work);
3639
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003640 unregister_netdev(adapter->netdev);
3641
Sathya Perla5fb379e2009-06-18 00:02:59 +00003642 be_clear(adapter);
3643
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003644 /* tell fw we're done with firing cmds */
3645 be_cmd_fw_clean(adapter);
3646
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003647 be_stats_cleanup(adapter);
3648
3649 be_ctrl_cleanup(adapter);
3650
Sathya Perlad6b6d982012-09-05 01:56:48 +00003651 pci_disable_pcie_error_reporting(pdev);
3652
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003653 pci_set_drvdata(pdev, NULL);
3654 pci_release_regions(pdev);
3655 pci_disable_device(pdev);
3656
3657 free_netdev(adapter->netdev);
3658}
3659
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003660bool be_is_wol_supported(struct be_adapter *adapter)
3661{
3662 return ((adapter->wol_cap & BE_WOL_CAP) &&
3663 !be_is_wol_excluded(adapter)) ? true : false;
3664}
3665
Somnath Kotur941a77d2012-05-17 22:59:03 +00003666u32 be_get_fw_log_level(struct be_adapter *adapter)
3667{
3668 struct be_dma_mem extfat_cmd;
3669 struct be_fat_conf_params *cfgs;
3670 int status;
3671 u32 level = 0;
3672 int j;
3673
3674 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3675 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3676 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3677 &extfat_cmd.dma);
3678
3679 if (!extfat_cmd.va) {
3680 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3681 __func__);
3682 goto err;
3683 }
3684
3685 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3686 if (!status) {
3687 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3688 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003689 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003690 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3691 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3692 }
3693 }
3694 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3695 extfat_cmd.dma);
3696err:
3697 return level;
3698}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003699
Sathya Perla39f1d942012-05-08 19:41:24 +00003700static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003701{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003702 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003703 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003704
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003705 status = be_cmd_get_cntl_attributes(adapter);
3706 if (status)
3707 return status;
3708
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003709 status = be_cmd_get_acpi_wol_cap(adapter);
3710 if (status) {
3711 /* in case of a failure to get wol capabillities
3712 * check the exclusion list to determine WOL capability */
3713 if (!be_is_wol_excluded(adapter))
3714 adapter->wol_cap |= BE_WOL_CAP;
3715 }
3716
3717 if (be_is_wol_supported(adapter))
3718 adapter->wol = true;
3719
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003720 /* Must be a power of 2 or else MODULO will BUG_ON */
3721 adapter->be_get_temp_freq = 64;
3722
Somnath Kotur941a77d2012-05-17 22:59:03 +00003723 level = be_get_fw_log_level(adapter);
3724 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3725
Sathya Perla2243e2e2009-11-22 22:02:03 +00003726 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003727}
3728
Sathya Perla39f1d942012-05-08 19:41:24 +00003729static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003730{
3731 struct pci_dev *pdev = adapter->pdev;
3732 u32 sli_intf = 0, if_type;
3733
3734 switch (pdev->device) {
3735 case BE_DEVICE_ID1:
3736 case OC_DEVICE_ID1:
3737 adapter->generation = BE_GEN2;
3738 break;
3739 case BE_DEVICE_ID2:
3740 case OC_DEVICE_ID2:
3741 adapter->generation = BE_GEN3;
3742 break;
3743 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003744 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003745 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003746 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3747 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003748 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3749 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003750 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003751 !be_type_2_3(adapter)) {
3752 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3753 return -EINVAL;
3754 }
3755 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3756 SLI_INTF_FAMILY_SHIFT);
3757 adapter->generation = BE_GEN3;
3758 break;
3759 case OC_DEVICE_ID5:
3760 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3761 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003762 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3763 return -EINVAL;
3764 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003765 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3766 SLI_INTF_FAMILY_SHIFT);
3767 adapter->generation = BE_GEN3;
3768 break;
3769 default:
3770 adapter->generation = 0;
3771 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003772
3773 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3774 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003775 return 0;
3776}
3777
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003778static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003779{
3780 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003781
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003782 status = lancer_test_and_set_rdy_state(adapter);
3783 if (status)
3784 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003785
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003786 if (netif_running(adapter->netdev))
3787 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003788
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003789 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003790
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003791 adapter->hw_error = false;
3792 adapter->fw_timeout = false;
3793
3794 status = be_setup(adapter);
3795 if (status)
3796 goto err;
3797
3798 if (netif_running(adapter->netdev)) {
3799 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003800 if (status)
3801 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003802 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003803
3804 dev_err(&adapter->pdev->dev,
3805 "Adapter SLIPORT recovery succeeded\n");
3806 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003807err:
3808 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003809 "Adapter SLIPORT recovery failed\n");
3810
3811 return status;
3812}
3813
3814static void be_func_recovery_task(struct work_struct *work)
3815{
3816 struct be_adapter *adapter =
3817 container_of(work, struct be_adapter, func_recovery_work.work);
3818 int status;
3819
3820 be_detect_error(adapter);
3821
3822 if (adapter->hw_error && lancer_chip(adapter)) {
3823
3824 if (adapter->eeh_error)
3825 goto out;
3826
3827 rtnl_lock();
3828 netif_device_detach(adapter->netdev);
3829 rtnl_unlock();
3830
3831 status = lancer_recover_func(adapter);
3832
3833 if (!status)
3834 netif_device_attach(adapter->netdev);
3835 }
3836
3837out:
3838 schedule_delayed_work(&adapter->func_recovery_work,
3839 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003840}
3841
3842static void be_worker(struct work_struct *work)
3843{
3844 struct be_adapter *adapter =
3845 container_of(work, struct be_adapter, work.work);
3846 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003847 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003848 int i;
3849
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003850 /* when interrupts are not yet enabled, just reap any pending
3851 * mcc completions */
3852 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00003853 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003854 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00003855 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003856 goto reschedule;
3857 }
3858
3859 if (!adapter->stats_cmd_sent) {
3860 if (lancer_chip(adapter))
3861 lancer_cmd_get_pport_stats(adapter,
3862 &adapter->stats_cmd);
3863 else
3864 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3865 }
3866
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003867 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3868 be_cmd_get_die_temperature(adapter);
3869
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003870 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003871 if (rxo->rx_post_starved) {
3872 rxo->rx_post_starved = false;
3873 be_post_rx_frags(rxo, GFP_KERNEL);
3874 }
3875 }
3876
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003877 for_all_evt_queues(adapter, eqo, i)
3878 be_eqd_update(adapter, eqo);
3879
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003880reschedule:
3881 adapter->work_counter++;
3882 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3883}
3884
Sathya Perla39f1d942012-05-08 19:41:24 +00003885static bool be_reset_required(struct be_adapter *adapter)
3886{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003887 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003888}
3889
Sathya Perlad3791422012-09-28 04:39:44 +00003890static char *mc_name(struct be_adapter *adapter)
3891{
3892 if (adapter->function_mode & FLEX10_MODE)
3893 return "FLEX10";
3894 else if (adapter->function_mode & VNIC_MODE)
3895 return "vNIC";
3896 else if (adapter->function_mode & UMC_ENABLED)
3897 return "UMC";
3898 else
3899 return "";
3900}
3901
3902static inline char *func_name(struct be_adapter *adapter)
3903{
3904 return be_physfn(adapter) ? "PF" : "VF";
3905}
3906
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003907static int __devinit be_probe(struct pci_dev *pdev,
3908 const struct pci_device_id *pdev_id)
3909{
3910 int status = 0;
3911 struct be_adapter *adapter;
3912 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003913 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003914
3915 status = pci_enable_device(pdev);
3916 if (status)
3917 goto do_none;
3918
3919 status = pci_request_regions(pdev, DRV_NAME);
3920 if (status)
3921 goto disable_dev;
3922 pci_set_master(pdev);
3923
Sathya Perla7f640062012-06-05 19:37:20 +00003924 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003925 if (netdev == NULL) {
3926 status = -ENOMEM;
3927 goto rel_reg;
3928 }
3929 adapter = netdev_priv(netdev);
3930 adapter->pdev = pdev;
3931 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003932
Sathya Perla39f1d942012-05-08 19:41:24 +00003933 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003934 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003935 goto free_netdev;
3936
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003937 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003938 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003939
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003940 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003941 if (!status) {
3942 netdev->features |= NETIF_F_HIGHDMA;
3943 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003944 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003945 if (status) {
3946 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3947 goto free_netdev;
3948 }
3949 }
3950
Sathya Perlad6b6d982012-09-05 01:56:48 +00003951 status = pci_enable_pcie_error_reporting(pdev);
3952 if (status)
3953 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3954
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003955 status = be_ctrl_init(adapter);
3956 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003957 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003958
Sathya Perla2243e2e2009-11-22 22:02:03 +00003959 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003960 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003961 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003962 if (status)
3963 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003964 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003965
3966 /* tell fw we're ready to fire cmds */
3967 status = be_cmd_fw_init(adapter);
3968 if (status)
3969 goto ctrl_clean;
3970
Sathya Perla39f1d942012-05-08 19:41:24 +00003971 if (be_reset_required(adapter)) {
3972 status = be_cmd_reset_function(adapter);
3973 if (status)
3974 goto ctrl_clean;
3975 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003976
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003977 /* The INTR bit may be set in the card when probed by a kdump kernel
3978 * after a crash.
3979 */
3980 if (!lancer_chip(adapter))
3981 be_intr_set(adapter, false);
3982
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003983 status = be_stats_init(adapter);
3984 if (status)
3985 goto ctrl_clean;
3986
Sathya Perla39f1d942012-05-08 19:41:24 +00003987 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003988 if (status)
3989 goto stats_clean;
3990
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003991 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003992 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003993 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003994
Sathya Perla5fb379e2009-06-18 00:02:59 +00003995 status = be_setup(adapter);
3996 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00003997 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003998
Sathya Perla3abcded2010-10-03 22:12:27 -07003999 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004000 status = register_netdev(netdev);
4001 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004002 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004003
Parav Pandit045508a2012-03-26 14:27:13 +00004004 be_roce_dev_add(adapter);
4005
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004006 schedule_delayed_work(&adapter->func_recovery_work,
4007 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004008
4009 be_cmd_query_port_name(adapter, &port_name);
4010
Sathya Perlad3791422012-09-28 04:39:44 +00004011 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4012 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004013
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004014 return 0;
4015
Sathya Perla5fb379e2009-06-18 00:02:59 +00004016unsetup:
4017 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004018stats_clean:
4019 be_stats_cleanup(adapter);
4020ctrl_clean:
4021 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004022free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004023 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004024 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004025rel_reg:
4026 pci_release_regions(pdev);
4027disable_dev:
4028 pci_disable_device(pdev);
4029do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004030 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004031 return status;
4032}
4033
4034static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4035{
4036 struct be_adapter *adapter = pci_get_drvdata(pdev);
4037 struct net_device *netdev = adapter->netdev;
4038
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004039 if (adapter->wol)
4040 be_setup_wol(adapter, true);
4041
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004042 cancel_delayed_work_sync(&adapter->func_recovery_work);
4043
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004044 netif_device_detach(netdev);
4045 if (netif_running(netdev)) {
4046 rtnl_lock();
4047 be_close(netdev);
4048 rtnl_unlock();
4049 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004050 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004051
4052 pci_save_state(pdev);
4053 pci_disable_device(pdev);
4054 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4055 return 0;
4056}
4057
4058static int be_resume(struct pci_dev *pdev)
4059{
4060 int status = 0;
4061 struct be_adapter *adapter = pci_get_drvdata(pdev);
4062 struct net_device *netdev = adapter->netdev;
4063
4064 netif_device_detach(netdev);
4065
4066 status = pci_enable_device(pdev);
4067 if (status)
4068 return status;
4069
4070 pci_set_power_state(pdev, 0);
4071 pci_restore_state(pdev);
4072
Sathya Perla2243e2e2009-11-22 22:02:03 +00004073 /* tell fw we're ready to fire cmds */
4074 status = be_cmd_fw_init(adapter);
4075 if (status)
4076 return status;
4077
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004078 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004079 if (netif_running(netdev)) {
4080 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004081 be_open(netdev);
4082 rtnl_unlock();
4083 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004084
4085 schedule_delayed_work(&adapter->func_recovery_work,
4086 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004087 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004088
4089 if (adapter->wol)
4090 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004091
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004092 return 0;
4093}
4094
Sathya Perla82456b02010-02-17 01:35:37 +00004095/*
4096 * An FLR will stop BE from DMAing any data.
4097 */
4098static void be_shutdown(struct pci_dev *pdev)
4099{
4100 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004101
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004102 if (!adapter)
4103 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004104
Sathya Perla0f4a6822011-03-21 20:49:28 +00004105 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004106 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004107
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004108 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004109
Sathya Perla82456b02010-02-17 01:35:37 +00004110 if (adapter->wol)
4111 be_setup_wol(adapter, true);
4112
Ajit Khaparde57841862011-04-06 18:08:43 +00004113 be_cmd_reset_function(adapter);
4114
Sathya Perla82456b02010-02-17 01:35:37 +00004115 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004116}
4117
Sathya Perlacf588472010-02-14 21:22:01 +00004118static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4119 pci_channel_state_t state)
4120{
4121 struct be_adapter *adapter = pci_get_drvdata(pdev);
4122 struct net_device *netdev = adapter->netdev;
4123
4124 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4125
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004126 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004127
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004128 cancel_delayed_work_sync(&adapter->func_recovery_work);
4129
4130 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004131 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004132 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004133
4134 if (netif_running(netdev)) {
4135 rtnl_lock();
4136 be_close(netdev);
4137 rtnl_unlock();
4138 }
4139 be_clear(adapter);
4140
4141 if (state == pci_channel_io_perm_failure)
4142 return PCI_ERS_RESULT_DISCONNECT;
4143
4144 pci_disable_device(pdev);
4145
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004146 /* The error could cause the FW to trigger a flash debug dump.
4147 * Resetting the card while flash dump is in progress
4148 * can cause it not to recover; wait for it to finish
4149 */
4150 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00004151 return PCI_ERS_RESULT_NEED_RESET;
4152}
4153
4154static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4155{
4156 struct be_adapter *adapter = pci_get_drvdata(pdev);
4157 int status;
4158
4159 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004160 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004161
4162 status = pci_enable_device(pdev);
4163 if (status)
4164 return PCI_ERS_RESULT_DISCONNECT;
4165
4166 pci_set_master(pdev);
4167 pci_set_power_state(pdev, 0);
4168 pci_restore_state(pdev);
4169
4170 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004171 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004172 if (status)
4173 return PCI_ERS_RESULT_DISCONNECT;
4174
Sathya Perlad6b6d982012-09-05 01:56:48 +00004175 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004176 return PCI_ERS_RESULT_RECOVERED;
4177}
4178
4179static void be_eeh_resume(struct pci_dev *pdev)
4180{
4181 int status = 0;
4182 struct be_adapter *adapter = pci_get_drvdata(pdev);
4183 struct net_device *netdev = adapter->netdev;
4184
4185 dev_info(&adapter->pdev->dev, "EEH resume\n");
4186
4187 pci_save_state(pdev);
4188
4189 /* tell fw we're ready to fire cmds */
4190 status = be_cmd_fw_init(adapter);
4191 if (status)
4192 goto err;
4193
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004194 status = be_cmd_reset_function(adapter);
4195 if (status)
4196 goto err;
4197
Sathya Perlacf588472010-02-14 21:22:01 +00004198 status = be_setup(adapter);
4199 if (status)
4200 goto err;
4201
4202 if (netif_running(netdev)) {
4203 status = be_open(netdev);
4204 if (status)
4205 goto err;
4206 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004207
4208 schedule_delayed_work(&adapter->func_recovery_work,
4209 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004210 netif_device_attach(netdev);
4211 return;
4212err:
4213 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004214}
4215
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004216static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004217 .error_detected = be_eeh_err_detected,
4218 .slot_reset = be_eeh_reset,
4219 .resume = be_eeh_resume,
4220};
4221
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004222static struct pci_driver be_driver = {
4223 .name = DRV_NAME,
4224 .id_table = be_dev_ids,
4225 .probe = be_probe,
4226 .remove = be_remove,
4227 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004228 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004229 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004230 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004231};
4232
4233static int __init be_init_module(void)
4234{
Joe Perches8e95a202009-12-03 07:58:21 +00004235 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4236 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004237 printk(KERN_WARNING DRV_NAME
4238 " : Module param rx_frag_size must be 2048/4096/8192."
4239 " Using 2048\n");
4240 rx_frag_size = 2048;
4241 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004242
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004243 return pci_register_driver(&be_driver);
4244}
4245module_init(be_init_module);
4246
4247static void __exit be_exit_module(void)
4248{
4249 pci_unregister_driver(&be_driver);
4250}
4251module_exit(be_exit_module);