blob: 84379f4fe83711f2eb347249b5118bb5132c57b9 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070051static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000052 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070086static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000087 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700110 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
Sathya Perla752961a2011-10-24 02:45:03 +0000121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 mem->va = NULL;
135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000150 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
Sathya Perla8788fdc2009-07-27 22:52:03 +0000155static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000159 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000160 return;
161
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000205 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000206 return;
207
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000224 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000225 return;
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000238 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000239 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000241 if (!is_valid_ether_addr(addr->sa_data))
242 return -EADDRNOTAVAIL;
243
Somnath Koture3a7ae22011-10-27 07:14:05 +0000244 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000245 MAC_ADDRESS_TYPE_NETWORK, false,
246 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000247 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000248 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700249
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
251 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000252 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000253 if (status)
254 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Somnath Koture3a7ae22011-10-27 07:14:05 +0000256 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
257 }
258 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
259 return 0;
260err:
261 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 return status;
263}
264
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000265static void populate_be2_stats(struct be_adapter *adapter)
266{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000267 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
268 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
269 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000271 &rxf_stats->port[adapter->port_num];
272 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000273
Sathya Perlaac124ff2011-07-25 19:10:14 +0000274 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000275 drvs->rx_pause_frames = port_stats->rx_pause_frames;
276 drvs->rx_crc_errors = port_stats->rx_crc_errors;
277 drvs->rx_control_frames = port_stats->rx_control_frames;
278 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
279 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
280 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
281 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
282 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
283 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
284 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
285 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
286 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
287 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
288 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000290 drvs->rx_dropped_header_too_small =
291 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000292 drvs->rx_address_mismatch_drops =
293 port_stats->rx_address_mismatch_drops +
294 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000295 drvs->rx_alignment_symbol_errors =
296 port_stats->rx_alignment_symbol_errors;
297
298 drvs->tx_pauseframes = port_stats->tx_pauseframes;
299 drvs->tx_controlframes = port_stats->tx_controlframes;
300
301 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000302 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000307 drvs->forwarded_packets = rxf_stats->forwarded_packets;
308 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000309 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
310 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000311 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
312}
313
314static void populate_be3_stats(struct be_adapter *adapter)
315{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000316 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
317 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
318 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000320 &rxf_stats->port[adapter->port_num];
321 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000322
Sathya Perlaac124ff2011-07-25 19:10:14 +0000323 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000324 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
325 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
336 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
337 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
338 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
339 drvs->rx_dropped_header_too_small =
340 port_stats->rx_dropped_header_too_small;
341 drvs->rx_input_fifo_overflow_drop =
342 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000343 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 drvs->rx_alignment_symbol_errors =
345 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000347 drvs->tx_pauseframes = port_stats->tx_pauseframes;
348 drvs->tx_controlframes = port_stats->tx_controlframes;
349 drvs->jabber_events = port_stats->jabber_events;
350 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000352 drvs->forwarded_packets = rxf_stats->forwarded_packets;
353 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
355 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
357}
358
Selvin Xavier005d5692011-05-16 07:36:35 +0000359static void populate_lancer_stats(struct be_adapter *adapter)
360{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000361
Selvin Xavier005d5692011-05-16 07:36:35 +0000362 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000363 struct lancer_pport_stats *pport_stats =
364 pport_stats_from_cmd(adapter);
365
366 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
367 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
368 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
369 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000372 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376 drvs->rx_dropped_tcp_length =
377 pport_stats->rx_dropped_invalid_tcp_length;
378 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381 drvs->rx_dropped_header_too_small =
382 pport_stats->rx_dropped_header_too_small;
383 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000384 drvs->rx_address_mismatch_drops =
385 pport_stats->rx_address_mismatch_drops +
386 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
390 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 drvs->forwarded_packets = pport_stats->num_forwards_lo;
393 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000396}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perla09c1c682011-08-22 19:41:53 +0000398static void accumulate_16bit_val(u32 *acc, u16 val)
399{
400#define lo(x) (x & 0xFFFF)
401#define hi(x) (x & 0xFFFF0000)
402 bool wrapped = val < lo(*acc);
403 u32 newacc = hi(*acc) + val;
404
405 if (wrapped)
406 newacc += 65536;
407 ACCESS_ONCE(*acc) = newacc;
408}
409
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410void be_parse_stats(struct be_adapter *adapter)
411{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000412 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
413 struct be_rx_obj *rxo;
414 int i;
415
Selvin Xavier005d5692011-05-16 07:36:35 +0000416 if (adapter->generation == BE_GEN3) {
417 if (lancer_chip(adapter))
418 populate_lancer_stats(adapter);
419 else
420 populate_be3_stats(adapter);
421 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000423 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000424
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000425 if (lancer_chip(adapter))
426 goto done;
427
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000429 for_all_rx_queues(adapter, rxo, i) {
430 /* below erx HW counter can actually wrap around after
431 * 65535. Driver accumulates a 32-bit value
432 */
433 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
434 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
435 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000436done:
437 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438}
439
Sathya Perlaab1594e2011-07-25 19:10:15 +0000440static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
441 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700442{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000443 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700445 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000446 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000447 u64 pkts, bytes;
448 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700449 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700450
Sathya Perla3abcded2010-10-03 22:12:27 -0700451 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000452 const struct be_rx_stats *rx_stats = rx_stats(rxo);
453 do {
454 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
455 pkts = rx_stats(rxo)->rx_pkts;
456 bytes = rx_stats(rxo)->rx_bytes;
457 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
458 stats->rx_packets += pkts;
459 stats->rx_bytes += bytes;
460 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
461 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
462 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700463 }
464
Sathya Perla3c8def92011-06-12 20:01:58 +0000465 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000466 const struct be_tx_stats *tx_stats = tx_stats(txo);
467 do {
468 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
469 pkts = tx_stats(txo)->tx_pkts;
470 bytes = tx_stats(txo)->tx_bytes;
471 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
472 stats->tx_packets += pkts;
473 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000474 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700475
476 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000477 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000478 drvs->rx_alignment_symbol_errors +
479 drvs->rx_in_range_errors +
480 drvs->rx_out_range_errors +
481 drvs->rx_frame_too_long +
482 drvs->rx_dropped_too_small +
483 drvs->rx_dropped_too_short +
484 drvs->rx_dropped_header_too_small +
485 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000486 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700488 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490 drvs->rx_out_range_errors +
491 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000492
Sathya Perlaab1594e2011-07-25 19:10:15 +0000493 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700494
495 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000496 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000497
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498 /* receiver fifo overrun */
499 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000500 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000501 drvs->rx_input_fifo_overflow_drop +
502 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000503 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700504}
505
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000506void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508 struct net_device *netdev = adapter->netdev;
509
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000510 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000511 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000512 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000514
515 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
516 netif_carrier_on(netdev);
517 else
518 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700519}
520
Sathya Perla3c8def92011-06-12 20:01:58 +0000521static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000522 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523{
Sathya Perla3c8def92011-06-12 20:01:58 +0000524 struct be_tx_stats *stats = tx_stats(txo);
525
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000527 stats->tx_reqs++;
528 stats->tx_wrbs += wrb_cnt;
529 stats->tx_bytes += copied;
530 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000532 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000533 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534}
535
536/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000537static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
538 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700539{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700540 int cnt = (skb->len > skb->data_len);
541
542 cnt += skb_shinfo(skb)->nr_frags;
543
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700544 /* to account for hdr wrb */
545 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000546 if (lancer_chip(adapter) || !(cnt & 1)) {
547 *dummy = false;
548 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 /* add a dummy to make it an even num */
550 cnt++;
551 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000552 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
554 return cnt;
555}
556
557static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
558{
559 wrb->frag_pa_hi = upper_32_bits(addr);
560 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
561 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000562 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700563}
564
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000565static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
566 struct sk_buff *skb)
567{
568 u8 vlan_prio;
569 u16 vlan_tag;
570
571 vlan_tag = vlan_tx_tag_get(skb);
572 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
573 /* If vlan priority provided by OS is NOT in available bmap */
574 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
575 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
576 adapter->recommended_prio;
577
578 return vlan_tag;
579}
580
Somnath Kotur93040ae2012-06-26 22:32:10 +0000581static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
582{
583 return vlan_tx_tag_present(skb) || adapter->pvid;
584}
585
Somnath Koturcc4ce022010-10-21 07:11:14 -0700586static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
587 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000589 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700590
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 memset(hdr, 0, sizeof(*hdr));
592
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
594
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000595 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
598 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000599 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000601 if (lancer_chip(adapter) && adapter->sli_family ==
602 LANCER_A0_SLI_FAMILY) {
603 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
604 if (is_tcp_pkt(skb))
605 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
606 tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
609 udpcs, hdr, 1);
610 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
612 if (is_tcp_pkt(skb))
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
614 else if (is_udp_pkt(skb))
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
616 }
617
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700618 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000620 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700621 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 }
623
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
627 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
628}
629
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000630static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000631 bool unmap_single)
632{
633 dma_addr_t dma;
634
635 be_dws_le_to_cpu(wrb, sizeof(*wrb));
636
637 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000638 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000639 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000640 dma_unmap_single(dev, dma, wrb->frag_len,
641 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000642 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000643 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000644 }
645}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646
Sathya Perla3c8def92011-06-12 20:01:58 +0000647static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
649{
Sathya Perla7101e112010-03-22 20:41:12 +0000650 dma_addr_t busaddr;
651 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654 struct be_eth_wrb *wrb;
655 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000656 bool map_single = false;
657 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 hdr = queue_head_node(txq);
660 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000661 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662
David S. Millerebc8d2a2009-06-09 01:01:31 -0700663 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700664 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000665 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
666 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000667 goto dma_err;
668 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 wrb = queue_head_node(txq);
670 wrb_fill(wrb, busaddr, len);
671 be_dws_cpu_to_le(wrb, sizeof(*wrb));
672 queue_head_inc(txq);
673 copied += len;
674 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000679 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000681 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000682 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700683 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000684 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700685 be_dws_cpu_to_le(wrb, sizeof(*wrb));
686 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000687 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 }
689
690 if (dummy_wrb) {
691 wrb = queue_head_node(txq);
692 wrb_fill(wrb, 0, 0);
693 be_dws_cpu_to_le(wrb, sizeof(*wrb));
694 queue_head_inc(txq);
695 }
696
Somnath Koturcc4ce022010-10-21 07:11:14 -0700697 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 be_dws_cpu_to_le(hdr, sizeof(*hdr));
699
700 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000701dma_err:
702 txq->head = map_head;
703 while (copied) {
704 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000705 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000706 map_single = false;
707 copied -= wrb->frag_len;
708 queue_head_inc(txq);
709 }
710 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711}
712
Somnath Kotur93040ae2012-06-26 22:32:10 +0000713static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
714 struct sk_buff *skb)
715{
716 u16 vlan_tag = 0;
717
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 return skb;
721
722 if (vlan_tx_tag_present(skb)) {
723 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
724 __vlan_put_tag(skb, vlan_tag);
725 skb->vlan_tci = 0;
726 }
727
728 return skb;
729}
730
Stephen Hemminger613573252009-08-31 19:50:58 +0000731static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700732 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733{
734 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000735 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
736 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000737 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000739 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700740 bool dummy_wrb, stopped = false;
741
Somnath Kotur93040ae2012-06-26 22:32:10 +0000742 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
743 VLAN_ETH_HLEN : ETH_HLEN;
744
745 /* HW has a bug which considers padding bytes as legal
746 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000747 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000748 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
749 is_ipv4_pkt(skb)) {
750 ip = (struct iphdr *)ip_hdr(skb);
751 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
752 }
753
754 /* HW has a bug wherein it will calculate CSUM for VLAN
755 * pkts even though it is disabled.
756 * Manually insert VLAN in pkt.
757 */
758 if (skb->ip_summed != CHECKSUM_PARTIAL &&
759 be_vlan_tag_chk(adapter, skb)) {
760 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000761 if (unlikely(!skb))
762 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000763 }
764
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000765 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766
Sathya Perla3c8def92011-06-12 20:01:58 +0000767 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000768 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000769 int gso_segs = skb_shinfo(skb)->gso_segs;
770
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000771 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000772 BUG_ON(txo->sent_skb_list[start]);
773 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000775 /* Ensure txq has space for the next skb; Else stop the queue
776 * *BEFORE* ringing the tx doorbell, so that we serialze the
777 * tx compls of the current transmit which'll wake up the queue
778 */
Sathya Perla7101e112010-03-22 20:41:12 +0000779 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000780 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
781 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000782 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000783 stopped = true;
784 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000786 be_txq_notify(adapter, txq->id, wrb_cnt);
787
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000788 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000789 } else {
790 txq->head = start;
791 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000793tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 return NETDEV_TX_OK;
795}
796
797static int be_change_mtu(struct net_device *netdev, int new_mtu)
798{
799 struct be_adapter *adapter = netdev_priv(netdev);
800 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000801 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
802 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700803 dev_info(&adapter->pdev->dev,
804 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000805 BE_MIN_MTU,
806 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807 return -EINVAL;
808 }
809 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
810 netdev->mtu, new_mtu);
811 netdev->mtu = new_mtu;
812 return 0;
813}
814
815/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000816 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
817 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818 */
Sathya Perla10329df2012-06-05 19:37:18 +0000819static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820{
Sathya Perla10329df2012-06-05 19:37:18 +0000821 u16 vids[BE_NUM_VLANS_SUPPORTED];
822 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000823 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000824
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000825 /* No need to further configure vids if in promiscuous mode */
826 if (adapter->promiscuous)
827 return 0;
828
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000829 if (adapter->vlans_added > adapter->max_vlans)
830 goto set_vlan_promisc;
831
832 /* Construct VLAN Table to give to HW */
833 for (i = 0; i < VLAN_N_VID; i++)
834 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000835 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000836
837 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000838 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000839
840 /* Set to VLAN promisc mode as setting VLAN filter failed */
841 if (status) {
842 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
843 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
844 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000846
Sathya Perlab31c50a2009-09-17 10:30:13 -0700847 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000848
849set_vlan_promisc:
850 status = be_cmd_vlan_config(adapter, adapter->if_handle,
851 NULL, 0, 1, 1);
852 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853}
854
Jiri Pirko8e586132011-12-08 19:52:37 -0500855static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856{
857 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000858 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000860 if (!be_physfn(adapter)) {
861 status = -EINVAL;
862 goto ret;
863 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000864
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000866 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000867 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500868
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000869 if (!status)
870 adapter->vlans_added++;
871 else
872 adapter->vlan_tag[vid] = 0;
873ret:
874 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875}
876
Jiri Pirko8e586132011-12-08 19:52:37 -0500877static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878{
879 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000880 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000882 if (!be_physfn(adapter)) {
883 status = -EINVAL;
884 goto ret;
885 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000886
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700887 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000888 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000889 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500890
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000891 if (!status)
892 adapter->vlans_added--;
893 else
894 adapter->vlan_tag[vid] = 1;
895ret:
896 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897}
898
Sathya Perlaa54769f2011-10-24 02:45:00 +0000899static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700900{
901 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000902 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700903
904 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000905 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000906 adapter->promiscuous = true;
907 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000909
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300910 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000911 if (adapter->promiscuous) {
912 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000913 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000914
915 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000916 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000917 }
918
Sathya Perlae7b909a2009-11-22 22:01:10 +0000919 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000920 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000921 netdev_mc_count(netdev) > BE_MAX_MC) {
922 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000923 goto done;
924 }
925
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000926 if (netdev_uc_count(netdev) != adapter->uc_macs) {
927 struct netdev_hw_addr *ha;
928 int i = 1; /* First slot is claimed by the Primary MAC */
929
930 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
931 be_cmd_pmac_del(adapter, adapter->if_handle,
932 adapter->pmac_id[i], 0);
933 }
934
935 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
936 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
937 adapter->promiscuous = true;
938 goto done;
939 }
940
941 netdev_for_each_uc_addr(ha, adapter->netdev) {
942 adapter->uc_macs++; /* First slot is for Primary MAC */
943 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
944 adapter->if_handle,
945 &adapter->pmac_id[adapter->uc_macs], 0);
946 }
947 }
948
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000949 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
950
951 /* Set to MCAST promisc mode if setting MULTICAST address fails */
952 if (status) {
953 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
954 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
955 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
956 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000957done:
958 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700959}
960
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000961static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
962{
963 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000964 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000965 int status;
966
Sathya Perla11ac75e2011-12-13 00:58:50 +0000967 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000968 return -EPERM;
969
Sathya Perla11ac75e2011-12-13 00:58:50 +0000970 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000971 return -EINVAL;
972
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000973 if (lancer_chip(adapter)) {
974 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
975 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000976 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
977 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000978
Sathya Perla11ac75e2011-12-13 00:58:50 +0000979 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
980 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000981 }
982
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000983 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000984 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
985 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000986 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000987 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000988
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000989 return status;
990}
991
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000992static int be_get_vf_config(struct net_device *netdev, int vf,
993 struct ifla_vf_info *vi)
994{
995 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000996 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000997
Sathya Perla11ac75e2011-12-13 00:58:50 +0000998 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000999 return -EPERM;
1000
Sathya Perla11ac75e2011-12-13 00:58:50 +00001001 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001002 return -EINVAL;
1003
1004 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001005 vi->tx_rate = vf_cfg->tx_rate;
1006 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001007 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001008 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001009
1010 return 0;
1011}
1012
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001013static int be_set_vf_vlan(struct net_device *netdev,
1014 int vf, u16 vlan, u8 qos)
1015{
1016 struct be_adapter *adapter = netdev_priv(netdev);
1017 int status = 0;
1018
Sathya Perla11ac75e2011-12-13 00:58:50 +00001019 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001020 return -EPERM;
1021
Sathya Perla11ac75e2011-12-13 00:58:50 +00001022 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001023 return -EINVAL;
1024
1025 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001026 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1027 /* If this is new value, program it. Else skip. */
1028 adapter->vf_cfg[vf].vlan_tag = vlan;
1029
1030 status = be_cmd_set_hsw_config(adapter, vlan,
1031 vf + 1, adapter->vf_cfg[vf].if_handle);
1032 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001033 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001034 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001035 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001036 vlan = adapter->vf_cfg[vf].def_vid;
1037 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1038 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001039 }
1040
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001041
1042 if (status)
1043 dev_info(&adapter->pdev->dev,
1044 "VLAN %d config on VF %d failed\n", vlan, vf);
1045 return status;
1046}
1047
Ajit Khapardee1d18732010-07-23 01:52:13 +00001048static int be_set_vf_tx_rate(struct net_device *netdev,
1049 int vf, int rate)
1050{
1051 struct be_adapter *adapter = netdev_priv(netdev);
1052 int status = 0;
1053
Sathya Perla11ac75e2011-12-13 00:58:50 +00001054 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001055 return -EPERM;
1056
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001057 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001058 return -EINVAL;
1059
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001060 if (rate < 100 || rate > 10000) {
1061 dev_err(&adapter->pdev->dev,
1062 "tx rate must be between 100 and 10000 Mbps\n");
1063 return -EINVAL;
1064 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001065
Ajit Khaparde856c4012011-02-11 13:32:32 +00001066 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001067
1068 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001069 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001070 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001071 else
1072 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001073 return status;
1074}
1075
Sathya Perla39f1d942012-05-08 19:41:24 +00001076static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1077{
1078 struct pci_dev *dev, *pdev = adapter->pdev;
David S. Millerd9f72f32012-09-27 22:19:02 -04001079 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
Sathya Perla39f1d942012-05-08 19:41:24 +00001080 u16 offset, stride;
1081
1082 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001083 if (!pos)
1084 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1086 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1087
1088 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1089 while (dev) {
David S. Millerd9f72f32012-09-27 22:19:02 -04001090 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1091 if (dev->is_virtfn && dev->devfn == vf_fn &&
1092 dev->bus->number == pdev->bus->number) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001093 vfs++;
1094 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1095 assigned_vfs++;
1096 }
1097 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1098 }
1099 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1100}
1101
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001102static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001104 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001105 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001106 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001107 u64 pkts;
1108 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001109
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001110 if (!eqo->enable_aic) {
1111 eqd = eqo->eqd;
1112 goto modify_eqd;
1113 }
1114
1115 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001116 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001118 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1119
Sathya Perla4097f662009-03-24 16:40:13 -07001120 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001121 if (time_before(now, stats->rx_jiffies)) {
1122 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001123 return;
1124 }
1125
Sathya Perlaac124ff2011-07-25 19:10:14 +00001126 /* Update once a second */
1127 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001128 return;
1129
Sathya Perlaab1594e2011-07-25 19:10:15 +00001130 do {
1131 start = u64_stats_fetch_begin_bh(&stats->sync);
1132 pkts = stats->rx_pkts;
1133 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1134
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001135 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001136 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001137 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001138 eqd = (stats->rx_pps / 110000) << 3;
1139 eqd = min(eqd, eqo->max_eqd);
1140 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001141 if (eqd < 10)
1142 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001143
1144modify_eqd:
1145 if (eqd != eqo->cur_eqd) {
1146 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1147 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001148 }
Sathya Perla4097f662009-03-24 16:40:13 -07001149}
1150
Sathya Perla3abcded2010-10-03 22:12:27 -07001151static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001152 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001153{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001154 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001155
Sathya Perlaab1594e2011-07-25 19:10:15 +00001156 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001157 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001158 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001159 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001160 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001161 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001162 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001163 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001164 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165}
1166
Sathya Perla2e588f82011-03-11 02:49:26 +00001167static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001168{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001169 /* L4 checksum is not reliable for non TCP/UDP packets.
1170 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001171 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1172 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001173}
1174
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001175static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1176 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001178 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001180 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181
Sathya Perla3abcded2010-10-03 22:12:27 -07001182 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183 BUG_ON(!rx_page_info->page);
1184
Ajit Khaparde205859a2010-02-09 01:34:21 +00001185 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001186 dma_unmap_page(&adapter->pdev->dev,
1187 dma_unmap_addr(rx_page_info, bus),
1188 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001189 rx_page_info->last_page_user = false;
1190 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191
1192 atomic_dec(&rxq->used);
1193 return rx_page_info;
1194}
1195
1196/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001197static void be_rx_compl_discard(struct be_rx_obj *rxo,
1198 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199{
Sathya Perla3abcded2010-10-03 22:12:27 -07001200 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001202 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001204 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001205 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001206 put_page(page_info->page);
1207 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001208 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209 }
1210}
1211
1212/*
1213 * skb_fill_rx_data forms a complete skb for an ether frame
1214 * indicated by rxcp.
1215 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001216static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1217 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218{
Sathya Perla3abcded2010-10-03 22:12:27 -07001219 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001221 u16 i, j;
1222 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223 u8 *start;
1224
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001225 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001226 start = page_address(page_info->page) + page_info->page_offset;
1227 prefetch(start);
1228
1229 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001230 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232 skb->len = curr_frag_len;
1233 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001234 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235 /* Complete packet has now been moved to data */
1236 put_page(page_info->page);
1237 skb->data_len = 0;
1238 skb->tail += curr_frag_len;
1239 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001240 hdr_len = ETH_HLEN;
1241 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001243 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001244 skb_shinfo(skb)->frags[0].page_offset =
1245 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001246 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001248 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 skb->tail += hdr_len;
1250 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001251 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252
Sathya Perla2e588f82011-03-11 02:49:26 +00001253 if (rxcp->pkt_size <= rx_frag_size) {
1254 BUG_ON(rxcp->num_rcvd != 1);
1255 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 }
1257
1258 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001259 index_inc(&rxcp->rxq_idx, rxq->len);
1260 remaining = rxcp->pkt_size - curr_frag_len;
1261 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001262 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001263 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001265 /* Coalesce all frags from the same physical page in one slot */
1266 if (page_info->page_offset == 0) {
1267 /* Fresh page */
1268 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001269 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001270 skb_shinfo(skb)->frags[j].page_offset =
1271 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001272 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001273 skb_shinfo(skb)->nr_frags++;
1274 } else {
1275 put_page(page_info->page);
1276 }
1277
Eric Dumazet9e903e02011-10-18 21:00:24 +00001278 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279 skb->len += curr_frag_len;
1280 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001281 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001282 remaining -= curr_frag_len;
1283 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001284 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001285 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001286 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287}
1288
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001289/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001290static void be_rx_compl_process(struct be_rx_obj *rxo,
1291 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001293 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001294 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001296
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001297 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001298 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001299 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001300 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301 return;
1302 }
1303
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001304 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001306 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001307 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001308 else
1309 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001311 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001312 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001313 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001314 skb->rxhash = rxcp->rss_hash;
1315
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316
Jiri Pirko343e43c2011-08-25 02:50:51 +00001317 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001318 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1319
1320 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321}
1322
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001323/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001324void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1325 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001327 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001328 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001329 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001330 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001331 u16 remaining, curr_frag_len;
1332 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001333
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001334 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001335 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001336 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001337 return;
1338 }
1339
Sathya Perla2e588f82011-03-11 02:49:26 +00001340 remaining = rxcp->pkt_size;
1341 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001342 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001343
1344 curr_frag_len = min(remaining, rx_frag_size);
1345
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001346 /* Coalesce all frags from the same physical page in one slot */
1347 if (i == 0 || page_info->page_offset == 0) {
1348 /* First frag or Fresh page */
1349 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001350 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001351 skb_shinfo(skb)->frags[j].page_offset =
1352 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001353 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001354 } else {
1355 put_page(page_info->page);
1356 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001357 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001358 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001360 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361 memset(page_info, 0, sizeof(*page_info));
1362 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001363 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001365 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001366 skb->len = rxcp->pkt_size;
1367 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001368 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001369 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001370 if (adapter->netdev->features & NETIF_F_RXHASH)
1371 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001372
Jiri Pirko343e43c2011-08-25 02:50:51 +00001373 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001374 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001376 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377}
1378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001379static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1380 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381{
Sathya Perla2e588f82011-03-11 02:49:26 +00001382 rxcp->pkt_size =
1383 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1384 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1385 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1386 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001387 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001388 rxcp->ip_csum =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1390 rxcp->l4_csum =
1391 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1392 rxcp->ipv6 =
1393 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1394 rxcp->rxq_idx =
1395 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1396 rxcp->num_rcvd =
1397 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1398 rxcp->pkt_type =
1399 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001400 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001401 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001402 if (rxcp->vlanf) {
1403 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001404 compl);
1405 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1406 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001407 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001408 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001409}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001411static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1412 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001413{
1414 rxcp->pkt_size =
1415 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1416 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1417 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1418 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001419 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001420 rxcp->ip_csum =
1421 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1422 rxcp->l4_csum =
1423 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1424 rxcp->ipv6 =
1425 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1426 rxcp->rxq_idx =
1427 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1428 rxcp->num_rcvd =
1429 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1430 rxcp->pkt_type =
1431 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001432 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001433 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001434 if (rxcp->vlanf) {
1435 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001436 compl);
1437 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1438 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001439 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001440 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001441}
1442
1443static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1444{
1445 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1446 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1447 struct be_adapter *adapter = rxo->adapter;
1448
1449 /* For checking the valid bit it is Ok to use either definition as the
1450 * valid bit is at the same position in both v0 and v1 Rx compl */
1451 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452 return NULL;
1453
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001454 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001455 be_dws_le_to_cpu(compl, sizeof(*compl));
1456
1457 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001458 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001459 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001460 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001461
Sathya Perla15d72182011-03-21 20:49:26 +00001462 if (rxcp->vlanf) {
1463 /* vlanf could be wrongly set in some cards.
1464 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001465 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001466 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001467
Sathya Perla15d72182011-03-21 20:49:26 +00001468 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001469 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001470
Somnath Kotur939cf302011-08-18 21:51:49 -07001471 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001472 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001473 rxcp->vlanf = 0;
1474 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001475
1476 /* As the compl has been parsed, reset it; we wont touch it again */
1477 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478
Sathya Perla3abcded2010-10-03 22:12:27 -07001479 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 return rxcp;
1481}
1482
Eric Dumazet1829b082011-03-01 05:48:12 +00001483static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001486
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001488 gfp |= __GFP_COMP;
1489 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
1492/*
1493 * Allocate a page, split it to fragments of size rx_frag_size and post as
1494 * receive buffers to BE
1495 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001496static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497{
Sathya Perla3abcded2010-10-03 22:12:27 -07001498 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001499 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001500 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501 struct page *pagep = NULL;
1502 struct be_eth_rx_d *rxd;
1503 u64 page_dmaaddr = 0, frag_dmaaddr;
1504 u32 posted, page_offset = 0;
1505
Sathya Perla3abcded2010-10-03 22:12:27 -07001506 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1508 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001509 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001511 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 break;
1513 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001514 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1515 0, adapter->big_page_size,
1516 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 page_info->page_offset = 0;
1518 } else {
1519 get_page(pagep);
1520 page_info->page_offset = page_offset + rx_frag_size;
1521 }
1522 page_offset = page_info->page_offset;
1523 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001524 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1526
1527 rxd = queue_head_node(rxq);
1528 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1529 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530
1531 /* Any space left in the current big page for another frag? */
1532 if ((page_offset + rx_frag_size + rx_frag_size) >
1533 adapter->big_page_size) {
1534 pagep = NULL;
1535 page_info->last_page_user = true;
1536 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001537
1538 prev_page_info = page_info;
1539 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001540 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 }
1542 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001543 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544
1545 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001547 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001548 } else if (atomic_read(&rxq->used) == 0) {
1549 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001550 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552}
1553
Sathya Perla5fb379e2009-06-18 00:02:59 +00001554static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1557
1558 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1559 return NULL;
1560
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001561 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1563
1564 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1565
1566 queue_tail_inc(tx_cq);
1567 return txcp;
1568}
1569
Sathya Perla3c8def92011-06-12 20:01:58 +00001570static u16 be_tx_compl_process(struct be_adapter *adapter,
1571 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572{
Sathya Perla3c8def92011-06-12 20:01:58 +00001573 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001574 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001575 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001577 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1578 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001580 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001582 sent_skbs[txq->tail] = NULL;
1583
1584 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001585 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001587 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001589 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001590 unmap_tx_frag(&adapter->pdev->dev, wrb,
1591 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001592 unmap_skb_hdr = false;
1593
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594 num_wrbs++;
1595 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001596 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001599 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600}
1601
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001602/* Return the number of events in the event queue */
1603static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001604{
1605 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001607
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608 do {
1609 eqe = queue_tail_node(&eqo->q);
1610 if (eqe->evt == 0)
1611 break;
1612
1613 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001614 eqe->evt = 0;
1615 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001616 queue_tail_inc(&eqo->q);
1617 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001618
1619 return num;
1620}
1621
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001622static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001623{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001624 bool rearm = false;
1625 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001626
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001627 /* Deal with any spurious interrupts that come without events */
1628 if (!num)
1629 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001630
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001631 if (num || msix_enabled(eqo->adapter))
1632 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1633
Sathya Perla859b1e42009-08-10 03:43:51 +00001634 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001635 napi_schedule(&eqo->napi);
1636
1637 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001638}
1639
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001640/* Leaves the EQ is disarmed state */
1641static void be_eq_clean(struct be_eq_obj *eqo)
1642{
1643 int num = events_get(eqo);
1644
1645 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1646}
1647
1648static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649{
1650 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001651 struct be_queue_info *rxq = &rxo->q;
1652 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001653 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 u16 tail;
1655
1656 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001657 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001658 be_rx_compl_discard(rxo, rxcp);
1659 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660 }
1661
1662 /* Then free posted rx buffer that were not used */
1663 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001664 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001665 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666 put_page(page_info->page);
1667 memset(page_info, 0, sizeof(*page_info));
1668 }
1669 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001670 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671}
1672
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001673static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001675 struct be_tx_obj *txo;
1676 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001677 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001678 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001679 struct sk_buff *sent_skb;
1680 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001681 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682
Sathya Perlaa8e91792009-08-10 03:42:43 +00001683 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1684 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001685 pending_txqs = adapter->num_tx_qs;
1686
1687 for_all_tx_queues(adapter, txo, i) {
1688 txq = &txo->q;
1689 while ((txcp = be_tx_compl_get(&txo->cq))) {
1690 end_idx =
1691 AMAP_GET_BITS(struct amap_eth_tx_compl,
1692 wrb_index, txcp);
1693 num_wrbs += be_tx_compl_process(adapter, txo,
1694 end_idx);
1695 cmpl++;
1696 }
1697 if (cmpl) {
1698 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1699 atomic_sub(num_wrbs, &txq->used);
1700 cmpl = 0;
1701 num_wrbs = 0;
1702 }
1703 if (atomic_read(&txq->used) == 0)
1704 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001705 }
1706
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001707 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001708 break;
1709
1710 mdelay(1);
1711 } while (true);
1712
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001713 for_all_tx_queues(adapter, txo, i) {
1714 txq = &txo->q;
1715 if (atomic_read(&txq->used))
1716 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1717 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001718
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001719 /* free posted tx for which compls will never arrive */
1720 while (atomic_read(&txq->used)) {
1721 sent_skb = txo->sent_skb_list[txq->tail];
1722 end_idx = txq->tail;
1723 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1724 &dummy_wrb);
1725 index_adv(&end_idx, num_wrbs - 1, txq->len);
1726 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1727 atomic_sub(num_wrbs, &txq->used);
1728 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001729 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730}
1731
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001732static void be_evt_queues_destroy(struct be_adapter *adapter)
1733{
1734 struct be_eq_obj *eqo;
1735 int i;
1736
1737 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001738 if (eqo->q.created) {
1739 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001740 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001741 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001742 be_queue_free(adapter, &eqo->q);
1743 }
1744}
1745
1746static int be_evt_queues_create(struct be_adapter *adapter)
1747{
1748 struct be_queue_info *eq;
1749 struct be_eq_obj *eqo;
1750 int i, rc;
1751
1752 adapter->num_evt_qs = num_irqs(adapter);
1753
1754 for_all_evt_queues(adapter, eqo, i) {
1755 eqo->adapter = adapter;
1756 eqo->tx_budget = BE_TX_BUDGET;
1757 eqo->idx = i;
1758 eqo->max_eqd = BE_MAX_EQD;
1759 eqo->enable_aic = true;
1760
1761 eq = &eqo->q;
1762 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1763 sizeof(struct be_eq_entry));
1764 if (rc)
1765 return rc;
1766
1767 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1768 if (rc)
1769 return rc;
1770 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001771 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001772}
1773
Sathya Perla5fb379e2009-06-18 00:02:59 +00001774static void be_mcc_queues_destroy(struct be_adapter *adapter)
1775{
1776 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001777
Sathya Perla8788fdc2009-07-27 22:52:03 +00001778 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001779 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001780 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001781 be_queue_free(adapter, q);
1782
Sathya Perla8788fdc2009-07-27 22:52:03 +00001783 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001784 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001785 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001786 be_queue_free(adapter, q);
1787}
1788
1789/* Must be called only after TX qs are created as MCC shares TX EQ */
1790static int be_mcc_queues_create(struct be_adapter *adapter)
1791{
1792 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001793
Sathya Perla8788fdc2009-07-27 22:52:03 +00001794 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001795 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001796 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001797 goto err;
1798
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001799 /* Use the default EQ for MCC completions */
1800 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001801 goto mcc_cq_free;
1802
Sathya Perla8788fdc2009-07-27 22:52:03 +00001803 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001804 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1805 goto mcc_cq_destroy;
1806
Sathya Perla8788fdc2009-07-27 22:52:03 +00001807 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001808 goto mcc_q_free;
1809
1810 return 0;
1811
1812mcc_q_free:
1813 be_queue_free(adapter, q);
1814mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001815 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001816mcc_cq_free:
1817 be_queue_free(adapter, cq);
1818err:
1819 return -1;
1820}
1821
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822static void be_tx_queues_destroy(struct be_adapter *adapter)
1823{
1824 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001825 struct be_tx_obj *txo;
1826 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827
Sathya Perla3c8def92011-06-12 20:01:58 +00001828 for_all_tx_queues(adapter, txo, i) {
1829 q = &txo->q;
1830 if (q->created)
1831 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1832 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833
Sathya Perla3c8def92011-06-12 20:01:58 +00001834 q = &txo->cq;
1835 if (q->created)
1836 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1837 be_queue_free(adapter, q);
1838 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839}
1840
Sathya Perladafc0fe2011-10-24 02:45:02 +00001841static int be_num_txqs_want(struct be_adapter *adapter)
1842{
Sathya Perla39f1d942012-05-08 19:41:24 +00001843 if (sriov_want(adapter) || be_is_mc(adapter) ||
1844 lancer_chip(adapter) || !be_physfn(adapter) ||
1845 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001846 return 1;
1847 else
1848 return MAX_TX_QS;
1849}
1850
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001851static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001853 struct be_queue_info *cq, *eq;
1854 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001855 struct be_tx_obj *txo;
1856 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857
Sathya Perladafc0fe2011-10-24 02:45:02 +00001858 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001859 if (adapter->num_tx_qs != MAX_TX_QS) {
1860 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001861 netif_set_real_num_tx_queues(adapter->netdev,
1862 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001863 rtnl_unlock();
1864 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001865
Sathya Perla3c8def92011-06-12 20:01:58 +00001866 for_all_tx_queues(adapter, txo, i) {
1867 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001868 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1869 sizeof(struct be_eth_tx_compl));
1870 if (status)
1871 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001873 /* If num_evt_qs is less than num_tx_qs, then more than
1874 * one txq share an eq
1875 */
1876 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1877 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1878 if (status)
1879 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001880 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882}
1883
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001884static int be_tx_qs_create(struct be_adapter *adapter)
1885{
1886 struct be_tx_obj *txo;
1887 int i, status;
1888
1889 for_all_tx_queues(adapter, txo, i) {
1890 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1891 sizeof(struct be_eth_wrb));
1892 if (status)
1893 return status;
1894
1895 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1896 if (status)
1897 return status;
1898 }
1899
1900 return 0;
1901}
1902
1903static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904{
1905 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001906 struct be_rx_obj *rxo;
1907 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908
Sathya Perla3abcded2010-10-03 22:12:27 -07001909 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001910 q = &rxo->cq;
1911 if (q->created)
1912 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1913 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915}
1916
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001918{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001919 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001920 struct be_rx_obj *rxo;
1921 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001923 /* We'll create as many RSS rings as there are irqs.
1924 * But when there's only one irq there's no use creating RSS rings
1925 */
1926 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1927 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001928 if (adapter->num_rx_qs != MAX_RX_QS) {
1929 rtnl_lock();
1930 netif_set_real_num_rx_queues(adapter->netdev,
1931 adapter->num_rx_qs);
1932 rtnl_unlock();
1933 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001934
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001936 for_all_rx_queues(adapter, rxo, i) {
1937 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001938 cq = &rxo->cq;
1939 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1940 sizeof(struct be_eth_rx_compl));
1941 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001942 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001944 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1945 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001946 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001947 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001948 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950 if (adapter->num_rx_qs != MAX_RX_QS)
1951 dev_info(&adapter->pdev->dev,
Masanari Iidaf3f9f332012-08-03 02:36:51 +00001952 "Created only %d receive queues\n", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001953
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001954 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001955}
1956
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957static irqreturn_t be_intx(int irq, void *dev)
1958{
1959 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001960 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 /* With INTx only one EQ is used */
1963 num_evts = event_handle(&adapter->eq_obj[0]);
1964 if (num_evts)
1965 return IRQ_HANDLED;
1966 else
1967 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968}
1969
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001970static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001972 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001974 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975 return IRQ_HANDLED;
1976}
1977
Sathya Perla2e588f82011-03-11 02:49:26 +00001978static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979{
Sathya Perla2e588f82011-03-11 02:49:26 +00001980 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981}
1982
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001983static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1984 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001985{
Sathya Perla3abcded2010-10-03 22:12:27 -07001986 struct be_adapter *adapter = rxo->adapter;
1987 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001988 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989 u32 work_done;
1990
1991 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001992 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993 if (!rxcp)
1994 break;
1995
Sathya Perla12004ae2011-08-02 19:57:46 +00001996 /* Is it a flush compl that has no data */
1997 if (unlikely(rxcp->num_rcvd == 0))
1998 goto loop_continue;
1999
2000 /* Discard compl with partial DMA Lancer B0 */
2001 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002003 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002004 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002005
Sathya Perla12004ae2011-08-02 19:57:46 +00002006 /* On BE drop pkts that arrive due to imperfect filtering in
2007 * promiscuous mode on some skews
2008 */
2009 if (unlikely(rxcp->port != adapter->port_num &&
2010 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002011 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002012 goto loop_continue;
2013 }
2014
2015 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002017 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002019loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002020 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021 }
2022
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002023 if (work_done) {
2024 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002025
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2027 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002029
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002030 return work_done;
2031}
2032
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002033static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2034 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002037 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002039 for (work_done = 0; work_done < budget; work_done++) {
2040 txcp = be_tx_compl_get(&txo->cq);
2041 if (!txcp)
2042 break;
2043 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002044 AMAP_GET_BITS(struct amap_eth_tx_compl,
2045 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002046 }
2047
2048 if (work_done) {
2049 be_cq_notify(adapter, txo->cq.id, true, work_done);
2050 atomic_sub(num_wrbs, &txo->q.used);
2051
2052 /* As Tx wrbs have been freed up, wake up netdev queue
2053 * if it was stopped due to lack of tx wrbs. */
2054 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2055 atomic_read(&txo->q.used) < txo->q.len / 2) {
2056 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002057 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002058
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002059 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2060 tx_stats(txo)->tx_compl += work_done;
2061 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2062 }
2063 return (work_done < budget); /* Done */
2064}
Sathya Perla3c8def92011-06-12 20:01:58 +00002065
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002066int be_poll(struct napi_struct *napi, int budget)
2067{
2068 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2069 struct be_adapter *adapter = eqo->adapter;
2070 int max_work = 0, work, i;
2071 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002072
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002073 /* Process all TXQs serviced by this EQ */
2074 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2075 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2076 eqo->tx_budget, i);
2077 if (!tx_done)
2078 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079 }
2080
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002081 /* This loop will iterate twice for EQ0 in which
2082 * completions of the last RXQ (default one) are also processed
2083 * For other EQs the loop iterates only once
2084 */
2085 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2086 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2087 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002088 }
2089
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002090 if (is_mcc_eqo(eqo))
2091 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002092
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002093 if (max_work < budget) {
2094 napi_complete(napi);
2095 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2096 } else {
2097 /* As we'll continue in polling mode, count and clear events */
2098 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002099 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002100 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002101}
2102
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002103void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002104{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002105 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2106 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002107 u32 i;
2108
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002109 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002110 return;
2111
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002112 if (lancer_chip(adapter)) {
2113 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2114 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2115 sliport_err1 = ioread32(adapter->db +
2116 SLIPORT_ERROR1_OFFSET);
2117 sliport_err2 = ioread32(adapter->db +
2118 SLIPORT_ERROR2_OFFSET);
2119 }
2120 } else {
2121 pci_read_config_dword(adapter->pdev,
2122 PCICFG_UE_STATUS_LOW, &ue_lo);
2123 pci_read_config_dword(adapter->pdev,
2124 PCICFG_UE_STATUS_HIGH, &ue_hi);
2125 pci_read_config_dword(adapter->pdev,
2126 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2127 pci_read_config_dword(adapter->pdev,
2128 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002129
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002130 ue_lo = (ue_lo & ~ue_lo_mask);
2131 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002132 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002133
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002134 if (ue_lo || ue_hi ||
2135 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002136 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002137 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002138 "Error detected in the card\n");
2139 }
2140
2141 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2142 dev_err(&adapter->pdev->dev,
2143 "ERR: sliport status 0x%x\n", sliport_status);
2144 dev_err(&adapter->pdev->dev,
2145 "ERR: sliport error1 0x%x\n", sliport_err1);
2146 dev_err(&adapter->pdev->dev,
2147 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002148 }
2149
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002150 if (ue_lo) {
2151 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2152 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002153 dev_err(&adapter->pdev->dev,
2154 "UE: %s bit set\n", ue_status_low_desc[i]);
2155 }
2156 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002157
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002158 if (ue_hi) {
2159 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2160 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002161 dev_err(&adapter->pdev->dev,
2162 "UE: %s bit set\n", ue_status_hi_desc[i]);
2163 }
2164 }
2165
2166}
2167
Sathya Perla8d56ff12009-11-22 22:02:26 +00002168static void be_msix_disable(struct be_adapter *adapter)
2169{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002170 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002171 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002172 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002173 }
2174}
2175
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002176static uint be_num_rss_want(struct be_adapter *adapter)
2177{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002178 u32 num = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla4cbdaf62012-08-28 20:37:40 +00002180 !sriov_want(adapter) && be_physfn(adapter)) {
Yuval Mintz30e80b52012-07-01 03:19:00 +00002181 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2182 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2183 }
2184 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002185}
2186
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187static void be_msix_enable(struct be_adapter *adapter)
2188{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002190 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002192 /* If RSS queues are not used, need a vec for default RX Q */
2193 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002194 if (be_roce_supported(adapter)) {
2195 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2196 (num_online_cpus() + 1));
2197 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2198 num_vec += num_roce_vec;
2199 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2200 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002202
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002203 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204 adapter->msix_entries[i].entry = i;
2205
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002206 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002207 if (status == 0) {
2208 goto done;
2209 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002210 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002211 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002212 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002213 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002214 }
2215 return;
2216done:
Parav Pandit045508a2012-03-26 14:27:13 +00002217 if (be_roce_supported(adapter)) {
2218 if (num_vec > num_roce_vec) {
2219 adapter->num_msix_vec = num_vec - num_roce_vec;
2220 adapter->num_msix_roce_vec =
2221 num_vec - adapter->num_msix_vec;
2222 } else {
2223 adapter->num_msix_vec = num_vec;
2224 adapter->num_msix_roce_vec = 0;
2225 }
2226 } else
2227 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002228 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229}
2230
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002231static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002234 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235}
2236
2237static int be_msix_register(struct be_adapter *adapter)
2238{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002239 struct net_device *netdev = adapter->netdev;
2240 struct be_eq_obj *eqo;
2241 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002243 for_all_evt_queues(adapter, eqo, i) {
2244 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2245 vec = be_msix_vec_get(adapter, eqo);
2246 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002247 if (status)
2248 goto err_msix;
2249 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002250
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002251 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002252err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002253 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2254 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2255 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2256 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002257 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258 return status;
2259}
2260
2261static int be_irq_register(struct be_adapter *adapter)
2262{
2263 struct net_device *netdev = adapter->netdev;
2264 int status;
2265
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002266 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267 status = be_msix_register(adapter);
2268 if (status == 0)
2269 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002270 /* INTx is not supported for VF */
2271 if (!be_physfn(adapter))
2272 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273 }
2274
2275 /* INTx */
2276 netdev->irq = adapter->pdev->irq;
2277 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2278 adapter);
2279 if (status) {
2280 dev_err(&adapter->pdev->dev,
2281 "INTx request IRQ failed - err %d\n", status);
2282 return status;
2283 }
2284done:
2285 adapter->isr_registered = true;
2286 return 0;
2287}
2288
2289static void be_irq_unregister(struct be_adapter *adapter)
2290{
2291 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002293 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294
2295 if (!adapter->isr_registered)
2296 return;
2297
2298 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002299 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300 free_irq(netdev->irq, adapter);
2301 goto done;
2302 }
2303
2304 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002305 for_all_evt_queues(adapter, eqo, i)
2306 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002307
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308done:
2309 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310}
2311
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002313{
2314 struct be_queue_info *q;
2315 struct be_rx_obj *rxo;
2316 int i;
2317
2318 for_all_rx_queues(adapter, rxo, i) {
2319 q = &rxo->q;
2320 if (q->created) {
2321 be_cmd_rxq_destroy(adapter, q);
2322 /* After the rxq is invalidated, wait for a grace time
2323 * of 1ms for all dma to end and the flush compl to
2324 * arrive
2325 */
2326 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002328 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002330 }
2331}
2332
Sathya Perla889cd4b2010-05-30 23:33:45 +00002333static int be_close(struct net_device *netdev)
2334{
2335 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002336 struct be_eq_obj *eqo;
2337 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002338
Parav Pandit045508a2012-03-26 14:27:13 +00002339 be_roce_dev_close(adapter);
2340
Sathya Perla889cd4b2010-05-30 23:33:45 +00002341 be_async_mcc_disable(adapter);
2342
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002343 if (!lancer_chip(adapter))
2344 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002345
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002346 for_all_evt_queues(adapter, eqo, i) {
2347 napi_disable(&eqo->napi);
2348 if (msix_enabled(adapter))
2349 synchronize_irq(be_msix_vec_get(adapter, eqo));
2350 else
2351 synchronize_irq(netdev->irq);
2352 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002353 }
2354
Sathya Perla889cd4b2010-05-30 23:33:45 +00002355 be_irq_unregister(adapter);
2356
Sathya Perla889cd4b2010-05-30 23:33:45 +00002357 /* Wait for all pending tx completions to arrive so that
2358 * all tx skbs are freed.
2359 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002360 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002361
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002362 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002363 return 0;
2364}
2365
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002366static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002367{
2368 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002369 int rc, i, j;
2370 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002371
2372 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002373 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2374 sizeof(struct be_eth_rx_d));
2375 if (rc)
2376 return rc;
2377 }
2378
2379 /* The FW would like the default RXQ to be created first */
2380 rxo = default_rxo(adapter);
2381 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2382 adapter->if_handle, false, &rxo->rss_id);
2383 if (rc)
2384 return rc;
2385
2386 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002387 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002388 rx_frag_size, adapter->if_handle,
2389 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002390 if (rc)
2391 return rc;
2392 }
2393
2394 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002395 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2396 for_all_rss_queues(adapter, rxo, i) {
2397 if ((j + i) >= 128)
2398 break;
2399 rsstable[j + i] = rxo->rss_id;
2400 }
2401 }
2402 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002403 if (rc)
2404 return rc;
2405 }
2406
2407 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002408 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002409 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002410 return 0;
2411}
2412
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002413static int be_open(struct net_device *netdev)
2414{
2415 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002416 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002417 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002418 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002419 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002420 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002421
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002423 if (status)
2424 goto err;
2425
Sathya Perla5fb379e2009-06-18 00:02:59 +00002426 be_irq_register(adapter);
2427
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002428 if (!lancer_chip(adapter))
2429 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002430
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002432 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002433
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002434 for_all_tx_queues(adapter, txo, i)
2435 be_cq_notify(adapter, txo->cq.id, true, 0);
2436
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002437 be_async_mcc_enable(adapter);
2438
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002439 for_all_evt_queues(adapter, eqo, i) {
2440 napi_enable(&eqo->napi);
2441 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2442 }
2443
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002444 status = be_cmd_link_status_query(adapter, NULL, NULL,
2445 &link_status, 0);
2446 if (!status)
2447 be_link_status_update(adapter, link_status);
2448
Parav Pandit045508a2012-03-26 14:27:13 +00002449 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002450 return 0;
2451err:
2452 be_close(adapter->netdev);
2453 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002454}
2455
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002456static int be_setup_wol(struct be_adapter *adapter, bool enable)
2457{
2458 struct be_dma_mem cmd;
2459 int status = 0;
2460 u8 mac[ETH_ALEN];
2461
2462 memset(mac, 0, ETH_ALEN);
2463
2464 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002465 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2466 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002467 if (cmd.va == NULL)
2468 return -1;
2469 memset(cmd.va, 0, cmd.size);
2470
2471 if (enable) {
2472 status = pci_write_config_dword(adapter->pdev,
2473 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2474 if (status) {
2475 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002476 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002477 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2478 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002479 return status;
2480 }
2481 status = be_cmd_enable_magic_wol(adapter,
2482 adapter->netdev->dev_addr, &cmd);
2483 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2484 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2485 } else {
2486 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2487 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2488 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2489 }
2490
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002491 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002492 return status;
2493}
2494
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002495/*
2496 * Generate a seed MAC address from the PF MAC Address using jhash.
2497 * MAC Address for VFs are assigned incrementally starting from the seed.
2498 * These addresses are programmed in the ASIC by the PF and the VF driver
2499 * queries for the MAC address during its probe.
2500 */
2501static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2502{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002503 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002504 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002505 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002506 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002507
2508 be_vf_eth_addr_generate(adapter, mac);
2509
Sathya Perla11ac75e2011-12-13 00:58:50 +00002510 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002511 if (lancer_chip(adapter)) {
2512 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2513 } else {
2514 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002515 vf_cfg->if_handle,
2516 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002517 }
2518
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002519 if (status)
2520 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002521 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002522 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002523 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002524
2525 mac[5] += 1;
2526 }
2527 return status;
2528}
2529
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002530static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002531{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002532 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002533 u32 vf;
2534
Sathya Perla39f1d942012-05-08 19:41:24 +00002535 if (be_find_vfs(adapter, ASSIGNED)) {
2536 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2537 goto done;
2538 }
2539
Sathya Perla11ac75e2011-12-13 00:58:50 +00002540 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002541 if (lancer_chip(adapter))
2542 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2543 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002544 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2545 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002546
Sathya Perla11ac75e2011-12-13 00:58:50 +00002547 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2548 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002549 pci_disable_sriov(adapter->pdev);
2550done:
2551 kfree(adapter->vf_cfg);
2552 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002553}
2554
Sathya Perlaa54769f2011-10-24 02:45:00 +00002555static int be_clear(struct be_adapter *adapter)
2556{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002557 int i = 1;
2558
Sathya Perla191eb752012-02-23 18:50:13 +00002559 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2560 cancel_delayed_work_sync(&adapter->work);
2561 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2562 }
2563
Sathya Perla11ac75e2011-12-13 00:58:50 +00002564 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002565 be_vf_clear(adapter);
2566
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002567 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2568 be_cmd_pmac_del(adapter, adapter->if_handle,
2569 adapter->pmac_id[i], 0);
2570
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002571 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002572
2573 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002574 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002575 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002577
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002578 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002579 return 0;
2580}
2581
Sathya Perla39f1d942012-05-08 19:41:24 +00002582static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002583{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002584 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002585 int vf;
2586
Sathya Perla39f1d942012-05-08 19:41:24 +00002587 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2588 GFP_KERNEL);
2589 if (!adapter->vf_cfg)
2590 return -ENOMEM;
2591
Sathya Perla11ac75e2011-12-13 00:58:50 +00002592 for_all_vfs(adapter, vf_cfg, vf) {
2593 vf_cfg->if_handle = -1;
2594 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002595 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002596 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002597}
2598
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002599static int be_vf_setup(struct be_adapter *adapter)
2600{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002601 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002602 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002603 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002604 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002605 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002606
Sathya Perla39f1d942012-05-08 19:41:24 +00002607 enabled_vfs = be_find_vfs(adapter, ENABLED);
2608 if (enabled_vfs) {
2609 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2610 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2611 return 0;
2612 }
2613
2614 if (num_vfs > adapter->dev_num_vfs) {
2615 dev_warn(dev, "Device supports %d VFs and not %d\n",
2616 adapter->dev_num_vfs, num_vfs);
2617 num_vfs = adapter->dev_num_vfs;
2618 }
2619
2620 status = pci_enable_sriov(adapter->pdev, num_vfs);
2621 if (!status) {
2622 adapter->num_vfs = num_vfs;
2623 } else {
2624 /* Platform doesn't support SRIOV though device supports it */
2625 dev_warn(dev, "SRIOV enable failed\n");
2626 return 0;
2627 }
2628
2629 status = be_vf_setup_init(adapter);
2630 if (status)
2631 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002632
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002633 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2634 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002635 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002636 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2637 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002638 if (status)
2639 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002640 }
2641
Sathya Perla39f1d942012-05-08 19:41:24 +00002642 if (!enabled_vfs) {
2643 status = be_vf_eth_addr_config(adapter);
2644 if (status)
2645 goto err;
2646 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002647
Sathya Perla11ac75e2011-12-13 00:58:50 +00002648 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002649 lnk_speed = 1000;
2650 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002651 if (status)
2652 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002653 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002654
2655 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2656 vf + 1, vf_cfg->if_handle);
2657 if (status)
2658 goto err;
2659 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002660 }
2661 return 0;
2662err:
2663 return status;
2664}
2665
Sathya Perla30128032011-11-10 19:17:57 +00002666static void be_setup_init(struct be_adapter *adapter)
2667{
2668 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002669 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002670 adapter->if_handle = -1;
2671 adapter->be3_native = false;
2672 adapter->promiscuous = false;
2673 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002674 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002675}
2676
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002677static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2678 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002679{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002680 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002681
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002682 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2683 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2684 if (!lancer_chip(adapter) && !be_physfn(adapter))
2685 *active_mac = true;
2686 else
2687 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002688
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002689 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002690 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002691
2692 if (lancer_chip(adapter)) {
2693 status = be_cmd_get_mac_from_list(adapter, mac,
2694 active_mac, pmac_id, 0);
2695 if (*active_mac) {
2696 status = be_cmd_mac_addr_query(adapter, mac,
2697 MAC_ADDRESS_TYPE_NETWORK,
2698 false, if_handle,
2699 *pmac_id);
2700 }
2701 } else if (be_physfn(adapter)) {
2702 /* For BE3, for PF get permanent MAC */
2703 status = be_cmd_mac_addr_query(adapter, mac,
2704 MAC_ADDRESS_TYPE_NETWORK, true,
2705 0, 0);
2706 *active_mac = false;
2707 } else {
2708 /* For BE3, for VF get soft MAC assigned by PF*/
2709 status = be_cmd_mac_addr_query(adapter, mac,
2710 MAC_ADDRESS_TYPE_NETWORK, false,
2711 if_handle, 0);
2712 *active_mac = true;
2713 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002714 return status;
2715}
2716
Sathya Perla39f1d942012-05-08 19:41:24 +00002717/* Routine to query per function resource limits */
2718static int be_get_config(struct be_adapter *adapter)
2719{
2720 int pos;
2721 u16 dev_num_vfs;
2722
2723 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2724 if (pos) {
2725 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2726 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002727 if (!lancer_chip(adapter))
2728 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002729 adapter->dev_num_vfs = dev_num_vfs;
2730 }
2731 return 0;
2732}
2733
Sathya Perla5fb379e2009-06-18 00:02:59 +00002734static int be_setup(struct be_adapter *adapter)
2735{
Sathya Perla39f1d942012-05-08 19:41:24 +00002736 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002737 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002738 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002739 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002740 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002741 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002742
Sathya Perla30128032011-11-10 19:17:57 +00002743 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002744
Sathya Perla39f1d942012-05-08 19:41:24 +00002745 be_get_config(adapter);
2746
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002747 be_cmd_req_native_mode(adapter);
2748
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002749 be_msix_enable(adapter);
2750
2751 status = be_evt_queues_create(adapter);
2752 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002753 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002755 status = be_tx_cqs_create(adapter);
2756 if (status)
2757 goto err;
2758
2759 status = be_rx_cqs_create(adapter);
2760 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002761 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002762
Sathya Perla5fb379e2009-06-18 00:02:59 +00002763 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002764 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002765 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002766
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002767 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2768 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2769 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002770 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2771
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002772 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2773 cap_flags |= BE_IF_FLAGS_RSS;
2774 en_flags |= BE_IF_FLAGS_RSS;
2775 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002776
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002777 if (lancer_chip(adapter) && !be_physfn(adapter)) {
2778 en_flags = BE_IF_FLAGS_UNTAGGED |
2779 BE_IF_FLAGS_BROADCAST |
2780 BE_IF_FLAGS_MULTICAST;
2781 cap_flags = en_flags;
2782 }
2783
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002784 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002785 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002786 if (status != 0)
2787 goto err;
2788
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002789 memset(mac, 0, ETH_ALEN);
2790 active_mac = false;
2791 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2792 &active_mac, &adapter->pmac_id[0]);
2793 if (status != 0)
2794 goto err;
2795
2796 if (!active_mac) {
2797 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2798 &adapter->pmac_id[0], 0);
2799 if (status != 0)
2800 goto err;
2801 }
2802
2803 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2804 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2805 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002806 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002807
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002808 status = be_tx_qs_create(adapter);
2809 if (status)
2810 goto err;
2811
Sathya Perla04b71172011-09-27 13:30:27 -04002812 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002813
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002814 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002815 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002816
2817 be_set_rx_mode(adapter->netdev);
2818
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002819 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002820
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002821 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2822 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002823 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002824
Sathya Perla39f1d942012-05-08 19:41:24 +00002825 if (be_physfn(adapter) && num_vfs) {
2826 if (adapter->dev_num_vfs)
2827 be_vf_setup(adapter);
2828 else
2829 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002830 }
2831
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002832 be_cmd_get_phy_info(adapter);
2833 if (be_pause_supported(adapter))
2834 adapter->phy.fc_autoneg = 1;
2835
Sathya Perla191eb752012-02-23 18:50:13 +00002836 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2837 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002838 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002839err:
2840 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002841 return status;
2842}
2843
Ivan Vecera66268732011-12-08 01:31:21 +00002844#ifdef CONFIG_NET_POLL_CONTROLLER
2845static void be_netpoll(struct net_device *netdev)
2846{
2847 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002848 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002849 int i;
2850
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002851 for_all_evt_queues(adapter, eqo, i)
2852 event_handle(eqo);
2853
2854 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002855}
2856#endif
2857
Ajit Khaparde84517482009-09-04 03:12:16 +00002858#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002859char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2860
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002861static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002862 const u8 *p, u32 img_start, int image_size,
2863 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002864{
2865 u32 crc_offset;
2866 u8 flashed_crc[4];
2867 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002868
2869 crc_offset = hdr_size + img_start + image_size - 4;
2870
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002871 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002872
2873 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002874 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002875 if (status) {
2876 dev_err(&adapter->pdev->dev,
2877 "could not get crc from flash, not flashing redboot\n");
2878 return false;
2879 }
2880
2881 /*update redboot only if crc does not match*/
2882 if (!memcmp(flashed_crc, p, 4))
2883 return false;
2884 else
2885 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002886}
2887
Sathya Perla306f1342011-08-02 19:57:45 +00002888static bool phy_flashing_required(struct be_adapter *adapter)
2889{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002890 return (adapter->phy.phy_type == TN_8022 &&
2891 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002892}
2893
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002894static bool is_comp_in_ufi(struct be_adapter *adapter,
2895 struct flash_section_info *fsec, int type)
2896{
2897 int i = 0, img_type = 0;
2898 struct flash_section_info_g2 *fsec_g2 = NULL;
2899
2900 if (adapter->generation != BE_GEN3)
2901 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2902
2903 for (i = 0; i < MAX_FLASH_COMP; i++) {
2904 if (fsec_g2)
2905 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2906 else
2907 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2908
2909 if (img_type == type)
2910 return true;
2911 }
2912 return false;
2913
2914}
2915
2916struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2917 int header_size,
2918 const struct firmware *fw)
2919{
2920 struct flash_section_info *fsec = NULL;
2921 const u8 *p = fw->data;
2922
2923 p += header_size;
2924 while (p < (fw->data + fw->size)) {
2925 fsec = (struct flash_section_info *)p;
2926 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2927 return fsec;
2928 p += 32;
2929 }
2930 return NULL;
2931}
2932
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002933static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002934 const struct firmware *fw,
2935 struct be_dma_mem *flash_cmd,
2936 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002937
Ajit Khaparde84517482009-09-04 03:12:16 +00002938{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002939 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002940 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002941 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002942 int num_bytes;
2943 const u8 *p = fw->data;
2944 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002945 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002946 int num_comp, hdr_size;
2947 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002948
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002949 struct flash_comp gen3_flash_types[] = {
2950 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2951 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2952 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2953 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2954 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2955 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2956 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2957 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2958 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2959 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2960 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2961 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2962 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2963 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2964 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2965 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2966 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2967 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2968 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2969 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002970 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002971
2972 struct flash_comp gen2_flash_types[] = {
2973 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2974 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2975 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2976 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2977 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2978 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2979 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2980 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2981 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2982 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2983 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2984 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2985 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2986 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2987 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2988 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002989 };
2990
2991 if (adapter->generation == BE_GEN3) {
2992 pflashcomp = gen3_flash_types;
2993 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002994 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002995 } else {
2996 pflashcomp = gen2_flash_types;
2997 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002998 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002999 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003000 /* Get flash section info*/
3001 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3002 if (!fsec) {
3003 dev_err(&adapter->pdev->dev,
3004 "Invalid Cookie. UFI corrupted ?\n");
3005 return -1;
3006 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003007 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003008 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003009 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003010
3011 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3012 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3013 continue;
3014
3015 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003016 if (!phy_flashing_required(adapter))
3017 continue;
3018 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003019
3020 hdr_size = filehdr_size +
3021 (num_of_images * sizeof(struct image_hdr));
3022
3023 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3024 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3025 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003026 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003027
3028 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003029 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003030 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003031 if (p + pflashcomp[i].size > fw->data + fw->size)
3032 return -1;
3033 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003034 while (total_bytes) {
3035 if (total_bytes > 32*1024)
3036 num_bytes = 32*1024;
3037 else
3038 num_bytes = total_bytes;
3039 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003040 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003041 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003042 flash_op = FLASHROM_OPER_PHY_FLASH;
3043 else
3044 flash_op = FLASHROM_OPER_FLASH;
3045 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003046 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003047 flash_op = FLASHROM_OPER_PHY_SAVE;
3048 else
3049 flash_op = FLASHROM_OPER_SAVE;
3050 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003051 memcpy(req->params.data_buf, p, num_bytes);
3052 p += num_bytes;
3053 status = be_cmd_write_flashrom(adapter, flash_cmd,
3054 pflashcomp[i].optype, flash_op, num_bytes);
3055 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003056 if ((status == ILLEGAL_IOCTL_REQ) &&
3057 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003058 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003059 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003060 dev_err(&adapter->pdev->dev,
3061 "cmd to write to flash rom failed.\n");
3062 return -1;
3063 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003064 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003065 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003066 return 0;
3067}
3068
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003069static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3070{
3071 if (fhdr == NULL)
3072 return 0;
3073 if (fhdr->build[0] == '3')
3074 return BE_GEN3;
3075 else if (fhdr->build[0] == '2')
3076 return BE_GEN2;
3077 else
3078 return 0;
3079}
3080
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003081static int lancer_wait_idle(struct be_adapter *adapter)
3082{
3083#define SLIPORT_IDLE_TIMEOUT 30
3084 u32 reg_val;
3085 int status = 0, i;
3086
3087 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3088 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3089 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3090 break;
3091
3092 ssleep(1);
3093 }
3094
3095 if (i == SLIPORT_IDLE_TIMEOUT)
3096 status = -1;
3097
3098 return status;
3099}
3100
3101static int lancer_fw_reset(struct be_adapter *adapter)
3102{
3103 int status = 0;
3104
3105 status = lancer_wait_idle(adapter);
3106 if (status)
3107 return status;
3108
3109 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3110 PHYSDEV_CONTROL_OFFSET);
3111
3112 return status;
3113}
3114
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003115static int lancer_fw_download(struct be_adapter *adapter,
3116 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003117{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003118#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3119#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3120 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003121 const u8 *data_ptr = NULL;
3122 u8 *dest_image_ptr = NULL;
3123 size_t image_size = 0;
3124 u32 chunk_size = 0;
3125 u32 data_written = 0;
3126 u32 offset = 0;
3127 int status = 0;
3128 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003129 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003130
3131 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3132 dev_err(&adapter->pdev->dev,
3133 "FW Image not properly aligned. "
3134 "Length must be 4 byte aligned.\n");
3135 status = -EINVAL;
3136 goto lancer_fw_exit;
3137 }
3138
3139 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3140 + LANCER_FW_DOWNLOAD_CHUNK;
3141 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3142 &flash_cmd.dma, GFP_KERNEL);
3143 if (!flash_cmd.va) {
3144 status = -ENOMEM;
3145 dev_err(&adapter->pdev->dev,
3146 "Memory allocation failure while flashing\n");
3147 goto lancer_fw_exit;
3148 }
3149
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003150 dest_image_ptr = flash_cmd.va +
3151 sizeof(struct lancer_cmd_req_write_object);
3152 image_size = fw->size;
3153 data_ptr = fw->data;
3154
3155 while (image_size) {
3156 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3157
3158 /* Copy the image chunk content. */
3159 memcpy(dest_image_ptr, data_ptr, chunk_size);
3160
3161 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003162 chunk_size, offset,
3163 LANCER_FW_DOWNLOAD_LOCATION,
3164 &data_written, &change_status,
3165 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003166 if (status)
3167 break;
3168
3169 offset += data_written;
3170 data_ptr += data_written;
3171 image_size -= data_written;
3172 }
3173
3174 if (!status) {
3175 /* Commit the FW written */
3176 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003177 0, offset,
3178 LANCER_FW_DOWNLOAD_LOCATION,
3179 &data_written, &change_status,
3180 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003181 }
3182
3183 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3184 flash_cmd.dma);
3185 if (status) {
3186 dev_err(&adapter->pdev->dev,
3187 "Firmware load error. "
3188 "Status code: 0x%x Additional Status: 0x%x\n",
3189 status, add_status);
3190 goto lancer_fw_exit;
3191 }
3192
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003193 if (change_status == LANCER_FW_RESET_NEEDED) {
3194 status = lancer_fw_reset(adapter);
3195 if (status) {
3196 dev_err(&adapter->pdev->dev,
3197 "Adapter busy for FW reset.\n"
3198 "New FW will not be active.\n");
3199 goto lancer_fw_exit;
3200 }
3201 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3202 dev_err(&adapter->pdev->dev,
3203 "System reboot required for new FW"
3204 " to be active\n");
3205 }
3206
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003207 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3208lancer_fw_exit:
3209 return status;
3210}
3211
3212static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3213{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003214 struct flash_file_hdr_g2 *fhdr;
3215 struct flash_file_hdr_g3 *fhdr3;
3216 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003217 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003218 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003219 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003220
3221 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003222 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003223
Ajit Khaparde84517482009-09-04 03:12:16 +00003224 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003225 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3226 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003227 if (!flash_cmd.va) {
3228 status = -ENOMEM;
3229 dev_err(&adapter->pdev->dev,
3230 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003231 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003232 }
3233
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003234 if ((adapter->generation == BE_GEN3) &&
3235 (get_ufigen_type(fhdr) == BE_GEN3)) {
3236 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003237 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3238 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003239 img_hdr_ptr = (struct image_hdr *) (fw->data +
3240 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003241 i * sizeof(struct image_hdr)));
3242 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3243 status = be_flash_data(adapter, fw, &flash_cmd,
3244 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003245 }
3246 } else if ((adapter->generation == BE_GEN2) &&
3247 (get_ufigen_type(fhdr) == BE_GEN2)) {
3248 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3249 } else {
3250 dev_err(&adapter->pdev->dev,
3251 "UFI and Interface are not compatible for flashing\n");
3252 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003253 }
3254
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003255 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3256 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003257 if (status) {
3258 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003259 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003260 }
3261
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003262 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003263
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003264be_fw_exit:
3265 return status;
3266}
3267
3268int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3269{
3270 const struct firmware *fw;
3271 int status;
3272
3273 if (!netif_running(adapter->netdev)) {
3274 dev_err(&adapter->pdev->dev,
3275 "Firmware load not allowed (interface is down)\n");
3276 return -1;
3277 }
3278
3279 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3280 if (status)
3281 goto fw_exit;
3282
3283 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3284
3285 if (lancer_chip(adapter))
3286 status = lancer_fw_download(adapter, fw);
3287 else
3288 status = be_fw_download(adapter, fw);
3289
Ajit Khaparde84517482009-09-04 03:12:16 +00003290fw_exit:
3291 release_firmware(fw);
3292 return status;
3293}
3294
stephen hemmingere5686ad2012-01-05 19:10:25 +00003295static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003296 .ndo_open = be_open,
3297 .ndo_stop = be_close,
3298 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003299 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003300 .ndo_set_mac_address = be_mac_addr_set,
3301 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003302 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003303 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003304 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3305 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003306 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003307 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003308 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003309 .ndo_get_vf_config = be_get_vf_config,
3310#ifdef CONFIG_NET_POLL_CONTROLLER
3311 .ndo_poll_controller = be_netpoll,
3312#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003313};
3314
3315static void be_netdev_init(struct net_device *netdev)
3316{
3317 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003318 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003319 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003320
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003321 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003322 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3323 NETIF_F_HW_VLAN_TX;
3324 if (be_multi_rxq(adapter))
3325 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003326
3327 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003328 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003329
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003330 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003331 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003332
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003333 netdev->priv_flags |= IFF_UNICAST_FLT;
3334
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003335 netdev->flags |= IFF_MULTICAST;
3336
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003337 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003338
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003339 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003340
3341 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3342
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003343 for_all_evt_queues(adapter, eqo, i)
3344 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003345}
3346
3347static void be_unmap_pci_bars(struct be_adapter *adapter)
3348{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003349 if (adapter->csr)
3350 iounmap(adapter->csr);
3351 if (adapter->db)
3352 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003353 if (adapter->roce_db.base)
3354 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3355}
3356
3357static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3358{
3359 struct pci_dev *pdev = adapter->pdev;
3360 u8 __iomem *addr;
3361
3362 addr = pci_iomap(pdev, 2, 0);
3363 if (addr == NULL)
3364 return -ENOMEM;
3365
3366 adapter->roce_db.base = addr;
3367 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3368 adapter->roce_db.size = 8192;
3369 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3370 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003371}
3372
3373static int be_map_pci_bars(struct be_adapter *adapter)
3374{
3375 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003376 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003377
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003378 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003379 if (be_type_2_3(adapter)) {
3380 addr = ioremap_nocache(
3381 pci_resource_start(adapter->pdev, 0),
3382 pci_resource_len(adapter->pdev, 0));
3383 if (addr == NULL)
3384 return -ENOMEM;
3385 adapter->db = addr;
3386 }
3387 if (adapter->if_type == SLI_INTF_TYPE_3) {
3388 if (lancer_roce_map_pci_bars(adapter))
3389 goto pci_map_err;
3390 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003391 return 0;
3392 }
3393
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003394 if (be_physfn(adapter)) {
3395 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3396 pci_resource_len(adapter->pdev, 2));
3397 if (addr == NULL)
3398 return -ENOMEM;
3399 adapter->csr = addr;
3400 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003401
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003402 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003403 db_reg = 4;
3404 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003405 if (be_physfn(adapter))
3406 db_reg = 4;
3407 else
3408 db_reg = 0;
3409 }
3410 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3411 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003412 if (addr == NULL)
3413 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003414 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003415 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3416 adapter->roce_db.size = 4096;
3417 adapter->roce_db.io_addr =
3418 pci_resource_start(adapter->pdev, db_reg);
3419 adapter->roce_db.total_size =
3420 pci_resource_len(adapter->pdev, db_reg);
3421 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003422 return 0;
3423pci_map_err:
3424 be_unmap_pci_bars(adapter);
3425 return -ENOMEM;
3426}
3427
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003428static void be_ctrl_cleanup(struct be_adapter *adapter)
3429{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003430 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003431
3432 be_unmap_pci_bars(adapter);
3433
3434 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003435 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3436 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003437
Sathya Perla5b8821b2011-08-02 19:57:44 +00003438 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003439 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003440 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3441 mem->dma);
Sathya Perlacc7d7232012-08-28 20:37:43 +00003442 kfree(adapter->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003443}
3444
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003445static int be_ctrl_init(struct be_adapter *adapter)
3446{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003447 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3448 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003449 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003450 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003451
3452 status = be_map_pci_bars(adapter);
3453 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003454 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003455
3456 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003457 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3458 mbox_mem_alloc->size,
3459 &mbox_mem_alloc->dma,
3460 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003461 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003462 status = -ENOMEM;
3463 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003464 }
3465 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3466 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3467 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3468 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003469
Sathya Perla5b8821b2011-08-02 19:57:44 +00003470 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3471 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3472 &rx_filter->dma, GFP_KERNEL);
3473 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003474 status = -ENOMEM;
3475 goto free_mbox;
3476 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003477 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003478
Sathya Perlacc7d7232012-08-28 20:37:43 +00003479 /* primary mac needs 1 pmac entry */
3480 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3481 sizeof(*adapter->pmac_id), GFP_KERNEL);
3482 if (!adapter->pmac_id)
3483 return -ENOMEM;
3484
Ivan Vecera29849612010-12-14 05:43:19 +00003485 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003486 spin_lock_init(&adapter->mcc_lock);
3487 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003488
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003489 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003490 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003491 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003492
3493free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003494 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3495 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003496
3497unmap_pci_bars:
3498 be_unmap_pci_bars(adapter);
3499
3500done:
3501 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003502}
3503
3504static void be_stats_cleanup(struct be_adapter *adapter)
3505{
Sathya Perla3abcded2010-10-03 22:12:27 -07003506 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003507
3508 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003509 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3510 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003511}
3512
3513static int be_stats_init(struct be_adapter *adapter)
3514{
Sathya Perla3abcded2010-10-03 22:12:27 -07003515 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003516
Selvin Xavier005d5692011-05-16 07:36:35 +00003517 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003518 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003519 } else {
3520 if (lancer_chip(adapter))
3521 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3522 else
3523 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3524 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003525 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3526 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003527 if (cmd->va == NULL)
3528 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003529 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003530 return 0;
3531}
3532
3533static void __devexit be_remove(struct pci_dev *pdev)
3534{
3535 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003536
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003537 if (!adapter)
3538 return;
3539
Parav Pandit045508a2012-03-26 14:27:13 +00003540 be_roce_dev_remove(adapter);
3541
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003542 cancel_delayed_work_sync(&adapter->func_recovery_work);
3543
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003544 unregister_netdev(adapter->netdev);
3545
Sathya Perla5fb379e2009-06-18 00:02:59 +00003546 be_clear(adapter);
3547
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003548 /* tell fw we're done with firing cmds */
3549 be_cmd_fw_clean(adapter);
3550
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003551 be_stats_cleanup(adapter);
3552
3553 be_ctrl_cleanup(adapter);
3554
Sathya Perlad6b6d982012-09-05 01:56:48 +00003555 pci_disable_pcie_error_reporting(pdev);
3556
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003557 pci_set_drvdata(pdev, NULL);
3558 pci_release_regions(pdev);
3559 pci_disable_device(pdev);
3560
3561 free_netdev(adapter->netdev);
3562}
3563
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003564bool be_is_wol_supported(struct be_adapter *adapter)
3565{
3566 return ((adapter->wol_cap & BE_WOL_CAP) &&
3567 !be_is_wol_excluded(adapter)) ? true : false;
3568}
3569
Somnath Kotur941a77d2012-05-17 22:59:03 +00003570u32 be_get_fw_log_level(struct be_adapter *adapter)
3571{
3572 struct be_dma_mem extfat_cmd;
3573 struct be_fat_conf_params *cfgs;
3574 int status;
3575 u32 level = 0;
3576 int j;
3577
3578 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3579 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3580 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3581 &extfat_cmd.dma);
3582
3583 if (!extfat_cmd.va) {
3584 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3585 __func__);
3586 goto err;
3587 }
3588
3589 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3590 if (!status) {
3591 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3592 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003593 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003594 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3595 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3596 }
3597 }
3598 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3599 extfat_cmd.dma);
3600err:
3601 return level;
3602}
Sathya Perla39f1d942012-05-08 19:41:24 +00003603static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003604{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003605 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003606 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003607
Sathya Perla3abcded2010-10-03 22:12:27 -07003608 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3609 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003610 if (status)
3611 return status;
3612
Sathya Perla752961a2011-10-24 02:45:03 +00003613 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003614 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003615 else
3616 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3617
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003618 if (be_physfn(adapter))
3619 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3620 else
3621 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3622
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003623 status = be_cmd_get_cntl_attributes(adapter);
3624 if (status)
3625 return status;
3626
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003627 status = be_cmd_get_acpi_wol_cap(adapter);
3628 if (status) {
3629 /* in case of a failure to get wol capabillities
3630 * check the exclusion list to determine WOL capability */
3631 if (!be_is_wol_excluded(adapter))
3632 adapter->wol_cap |= BE_WOL_CAP;
3633 }
3634
3635 if (be_is_wol_supported(adapter))
3636 adapter->wol = true;
3637
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003638 /* Must be a power of 2 or else MODULO will BUG_ON */
3639 adapter->be_get_temp_freq = 64;
3640
Somnath Kotur941a77d2012-05-17 22:59:03 +00003641 level = be_get_fw_log_level(adapter);
3642 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3643
Sathya Perla2243e2e2009-11-22 22:02:03 +00003644 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003645}
3646
Sathya Perla39f1d942012-05-08 19:41:24 +00003647static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003648{
3649 struct pci_dev *pdev = adapter->pdev;
3650 u32 sli_intf = 0, if_type;
3651
3652 switch (pdev->device) {
3653 case BE_DEVICE_ID1:
3654 case OC_DEVICE_ID1:
3655 adapter->generation = BE_GEN2;
3656 break;
3657 case BE_DEVICE_ID2:
3658 case OC_DEVICE_ID2:
3659 adapter->generation = BE_GEN3;
3660 break;
3661 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003662 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003663 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003664 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3665 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003666 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3667 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003668 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003669 !be_type_2_3(adapter)) {
3670 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3671 return -EINVAL;
3672 }
3673 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3674 SLI_INTF_FAMILY_SHIFT);
3675 adapter->generation = BE_GEN3;
3676 break;
3677 case OC_DEVICE_ID5:
3678 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3679 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003680 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3681 return -EINVAL;
3682 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003683 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3684 SLI_INTF_FAMILY_SHIFT);
3685 adapter->generation = BE_GEN3;
3686 break;
3687 default:
3688 adapter->generation = 0;
3689 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003690
3691 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3692 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003693 return 0;
3694}
3695
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003696static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003697{
3698 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003699
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003700 status = lancer_test_and_set_rdy_state(adapter);
3701 if (status)
3702 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003703
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003704 if (netif_running(adapter->netdev))
3705 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003706
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003707 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003708
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003709 adapter->hw_error = false;
3710 adapter->fw_timeout = false;
3711
3712 status = be_setup(adapter);
3713 if (status)
3714 goto err;
3715
3716 if (netif_running(adapter->netdev)) {
3717 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003718 if (status)
3719 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003720 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003721
3722 dev_err(&adapter->pdev->dev,
3723 "Adapter SLIPORT recovery succeeded\n");
3724 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003725err:
3726 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003727 "Adapter SLIPORT recovery failed\n");
3728
3729 return status;
3730}
3731
3732static void be_func_recovery_task(struct work_struct *work)
3733{
3734 struct be_adapter *adapter =
3735 container_of(work, struct be_adapter, func_recovery_work.work);
3736 int status;
3737
3738 be_detect_error(adapter);
3739
3740 if (adapter->hw_error && lancer_chip(adapter)) {
3741
3742 if (adapter->eeh_error)
3743 goto out;
3744
3745 rtnl_lock();
3746 netif_device_detach(adapter->netdev);
3747 rtnl_unlock();
3748
3749 status = lancer_recover_func(adapter);
3750
3751 if (!status)
3752 netif_device_attach(adapter->netdev);
3753 }
3754
3755out:
3756 schedule_delayed_work(&adapter->func_recovery_work,
3757 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003758}
3759
3760static void be_worker(struct work_struct *work)
3761{
3762 struct be_adapter *adapter =
3763 container_of(work, struct be_adapter, work.work);
3764 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003765 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003766 int i;
3767
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003768 /* when interrupts are not yet enabled, just reap any pending
3769 * mcc completions */
3770 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00003771 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003772 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00003773 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003774 goto reschedule;
3775 }
3776
3777 if (!adapter->stats_cmd_sent) {
3778 if (lancer_chip(adapter))
3779 lancer_cmd_get_pport_stats(adapter,
3780 &adapter->stats_cmd);
3781 else
3782 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3783 }
3784
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003785 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3786 be_cmd_get_die_temperature(adapter);
3787
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003788 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003789 if (rxo->rx_post_starved) {
3790 rxo->rx_post_starved = false;
3791 be_post_rx_frags(rxo, GFP_KERNEL);
3792 }
3793 }
3794
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003795 for_all_evt_queues(adapter, eqo, i)
3796 be_eqd_update(adapter, eqo);
3797
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003798reschedule:
3799 adapter->work_counter++;
3800 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3801}
3802
Sathya Perla39f1d942012-05-08 19:41:24 +00003803static bool be_reset_required(struct be_adapter *adapter)
3804{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003805 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003806}
3807
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003808static int __devinit be_probe(struct pci_dev *pdev,
3809 const struct pci_device_id *pdev_id)
3810{
3811 int status = 0;
3812 struct be_adapter *adapter;
3813 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003814 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003815
3816 status = pci_enable_device(pdev);
3817 if (status)
3818 goto do_none;
3819
3820 status = pci_request_regions(pdev, DRV_NAME);
3821 if (status)
3822 goto disable_dev;
3823 pci_set_master(pdev);
3824
Sathya Perla7f640062012-06-05 19:37:20 +00003825 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003826 if (netdev == NULL) {
3827 status = -ENOMEM;
3828 goto rel_reg;
3829 }
3830 adapter = netdev_priv(netdev);
3831 adapter->pdev = pdev;
3832 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003833
Sathya Perla39f1d942012-05-08 19:41:24 +00003834 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003835 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003836 goto free_netdev;
3837
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003838 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003839 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003841 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003842 if (!status) {
3843 netdev->features |= NETIF_F_HIGHDMA;
3844 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003845 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003846 if (status) {
3847 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3848 goto free_netdev;
3849 }
3850 }
3851
Sathya Perlad6b6d982012-09-05 01:56:48 +00003852 status = pci_enable_pcie_error_reporting(pdev);
3853 if (status)
3854 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3855
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003856 status = be_ctrl_init(adapter);
3857 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003858 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003859
Sathya Perla2243e2e2009-11-22 22:02:03 +00003860 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003861 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003862 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003863 if (status)
3864 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003865 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003866
3867 /* tell fw we're ready to fire cmds */
3868 status = be_cmd_fw_init(adapter);
3869 if (status)
3870 goto ctrl_clean;
3871
Sathya Perla39f1d942012-05-08 19:41:24 +00003872 if (be_reset_required(adapter)) {
3873 status = be_cmd_reset_function(adapter);
3874 if (status)
3875 goto ctrl_clean;
3876 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003877
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003878 /* The INTR bit may be set in the card when probed by a kdump kernel
3879 * after a crash.
3880 */
3881 if (!lancer_chip(adapter))
3882 be_intr_set(adapter, false);
3883
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003884 status = be_stats_init(adapter);
3885 if (status)
3886 goto ctrl_clean;
3887
Sathya Perla39f1d942012-05-08 19:41:24 +00003888 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003889 if (status)
3890 goto stats_clean;
3891
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003892 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003893 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003894 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003895
Sathya Perla5fb379e2009-06-18 00:02:59 +00003896 status = be_setup(adapter);
3897 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003898 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003899
Sathya Perla3abcded2010-10-03 22:12:27 -07003900 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003901 status = register_netdev(netdev);
3902 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003903 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003904
Parav Pandit045508a2012-03-26 14:27:13 +00003905 be_roce_dev_add(adapter);
3906
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003907 schedule_delayed_work(&adapter->func_recovery_work,
3908 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003909
3910 be_cmd_query_port_name(adapter, &port_name);
3911
3912 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
3913 port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003914
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003915 return 0;
3916
Sathya Perla5fb379e2009-06-18 00:02:59 +00003917unsetup:
3918 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003919msix_disable:
3920 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003921stats_clean:
3922 be_stats_cleanup(adapter);
3923ctrl_clean:
3924 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003925free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003926 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003927 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003928rel_reg:
3929 pci_release_regions(pdev);
3930disable_dev:
3931 pci_disable_device(pdev);
3932do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003933 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003934 return status;
3935}
3936
3937static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3938{
3939 struct be_adapter *adapter = pci_get_drvdata(pdev);
3940 struct net_device *netdev = adapter->netdev;
3941
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003942 if (adapter->wol)
3943 be_setup_wol(adapter, true);
3944
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003945 cancel_delayed_work_sync(&adapter->func_recovery_work);
3946
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003947 netif_device_detach(netdev);
3948 if (netif_running(netdev)) {
3949 rtnl_lock();
3950 be_close(netdev);
3951 rtnl_unlock();
3952 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003953 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003954
3955 pci_save_state(pdev);
3956 pci_disable_device(pdev);
3957 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3958 return 0;
3959}
3960
3961static int be_resume(struct pci_dev *pdev)
3962{
3963 int status = 0;
3964 struct be_adapter *adapter = pci_get_drvdata(pdev);
3965 struct net_device *netdev = adapter->netdev;
3966
3967 netif_device_detach(netdev);
3968
3969 status = pci_enable_device(pdev);
3970 if (status)
3971 return status;
3972
3973 pci_set_power_state(pdev, 0);
3974 pci_restore_state(pdev);
3975
Sathya Perla2243e2e2009-11-22 22:02:03 +00003976 /* tell fw we're ready to fire cmds */
3977 status = be_cmd_fw_init(adapter);
3978 if (status)
3979 return status;
3980
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003981 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003982 if (netif_running(netdev)) {
3983 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003984 be_open(netdev);
3985 rtnl_unlock();
3986 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003987
3988 schedule_delayed_work(&adapter->func_recovery_work,
3989 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003990 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003991
3992 if (adapter->wol)
3993 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003994
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003995 return 0;
3996}
3997
Sathya Perla82456b02010-02-17 01:35:37 +00003998/*
3999 * An FLR will stop BE from DMAing any data.
4000 */
4001static void be_shutdown(struct pci_dev *pdev)
4002{
4003 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004004
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004005 if (!adapter)
4006 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004007
Sathya Perla0f4a6822011-03-21 20:49:28 +00004008 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004009 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004010
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004011 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004012
Sathya Perla82456b02010-02-17 01:35:37 +00004013 if (adapter->wol)
4014 be_setup_wol(adapter, true);
4015
Ajit Khaparde57841862011-04-06 18:08:43 +00004016 be_cmd_reset_function(adapter);
4017
Sathya Perla82456b02010-02-17 01:35:37 +00004018 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004019}
4020
Sathya Perlacf588472010-02-14 21:22:01 +00004021static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4022 pci_channel_state_t state)
4023{
4024 struct be_adapter *adapter = pci_get_drvdata(pdev);
4025 struct net_device *netdev = adapter->netdev;
4026
4027 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4028
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004029 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004030
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004031 cancel_delayed_work_sync(&adapter->func_recovery_work);
4032
4033 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004034 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004035 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004036
4037 if (netif_running(netdev)) {
4038 rtnl_lock();
4039 be_close(netdev);
4040 rtnl_unlock();
4041 }
4042 be_clear(adapter);
4043
4044 if (state == pci_channel_io_perm_failure)
4045 return PCI_ERS_RESULT_DISCONNECT;
4046
4047 pci_disable_device(pdev);
4048
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004049 /* The error could cause the FW to trigger a flash debug dump.
4050 * Resetting the card while flash dump is in progress
4051 * can cause it not to recover; wait for it to finish
4052 */
4053 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00004054 return PCI_ERS_RESULT_NEED_RESET;
4055}
4056
4057static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4058{
4059 struct be_adapter *adapter = pci_get_drvdata(pdev);
4060 int status;
4061
4062 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004063 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004064
4065 status = pci_enable_device(pdev);
4066 if (status)
4067 return PCI_ERS_RESULT_DISCONNECT;
4068
4069 pci_set_master(pdev);
4070 pci_set_power_state(pdev, 0);
4071 pci_restore_state(pdev);
4072
4073 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004074 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004075 if (status)
4076 return PCI_ERS_RESULT_DISCONNECT;
4077
Sathya Perlad6b6d982012-09-05 01:56:48 +00004078 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004079 return PCI_ERS_RESULT_RECOVERED;
4080}
4081
4082static void be_eeh_resume(struct pci_dev *pdev)
4083{
4084 int status = 0;
4085 struct be_adapter *adapter = pci_get_drvdata(pdev);
4086 struct net_device *netdev = adapter->netdev;
4087
4088 dev_info(&adapter->pdev->dev, "EEH resume\n");
4089
4090 pci_save_state(pdev);
4091
4092 /* tell fw we're ready to fire cmds */
4093 status = be_cmd_fw_init(adapter);
4094 if (status)
4095 goto err;
4096
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004097 status = be_cmd_reset_function(adapter);
4098 if (status)
4099 goto err;
4100
Sathya Perlacf588472010-02-14 21:22:01 +00004101 status = be_setup(adapter);
4102 if (status)
4103 goto err;
4104
4105 if (netif_running(netdev)) {
4106 status = be_open(netdev);
4107 if (status)
4108 goto err;
4109 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004110
4111 schedule_delayed_work(&adapter->func_recovery_work,
4112 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004113 netif_device_attach(netdev);
4114 return;
4115err:
4116 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004117}
4118
4119static struct pci_error_handlers be_eeh_handlers = {
4120 .error_detected = be_eeh_err_detected,
4121 .slot_reset = be_eeh_reset,
4122 .resume = be_eeh_resume,
4123};
4124
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004125static struct pci_driver be_driver = {
4126 .name = DRV_NAME,
4127 .id_table = be_dev_ids,
4128 .probe = be_probe,
4129 .remove = be_remove,
4130 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004131 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004132 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004133 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004134};
4135
4136static int __init be_init_module(void)
4137{
Joe Perches8e95a202009-12-03 07:58:21 +00004138 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4139 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004140 printk(KERN_WARNING DRV_NAME
4141 " : Module param rx_frag_size must be 2048/4096/8192."
4142 " Using 2048\n");
4143 rx_frag_size = 2048;
4144 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004145
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004146 return pci_register_driver(&be_driver);
4147}
4148module_init(be_init_module);
4149
4150static void __exit be_exit_module(void)
4151{
4152 pci_unregister_driver(&be_driver);
4153}
4154module_exit(be_exit_module);