blob: fa1743036a8877f12e202410e8a010782f917115 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070051static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000052 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070086static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000087 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700110 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
Sathya Perla752961a2011-10-24 02:45:03 +0000121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 mem->va = NULL;
135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000150 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
Sathya Perla8788fdc2009-07-27 22:52:03 +0000155static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000159 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000160 return;
161
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000205 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000206 return;
207
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000224 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000225 return;
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000238 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000239 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000241 if (!is_valid_ether_addr(addr->sa_data))
242 return -EADDRNOTAVAIL;
243
Sathya Perla5ee49792012-09-28 04:39:41 +0000244 status = be_cmd_mac_addr_query(adapter, current_mac, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000561 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562}
563
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566{
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578}
579
Somnath Kotur93040ae2012-06-26 22:32:10 +0000580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
Somnath Koturcc4ce022010-10-21 07:11:14 -0700585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000588 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700589
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590 memset(hdr, 0, sizeof(*hdr));
591
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000594 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 }
616
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700617 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 }
622
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627}
628
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000630 bool unmap_single)
631{
632 dma_addr_t dma;
633
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000637 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000638 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 dma_unmap_single(dev, dma, wrb->frag_len,
640 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000641 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000643 }
644}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla3c8def92011-06-12 20:01:58 +0000646static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648{
Sathya Perla7101e112010-03-22 20:41:12 +0000649 dma_addr_t busaddr;
650 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000655 bool map_single = false;
656 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 hdr = queue_head_node(txq);
659 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000660 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700663 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000666 goto dma_err;
667 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
672 copied += len;
673 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674
David S. Millerebc8d2a2009-06-09 01:01:31 -0700675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000676 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700677 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000678 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000679 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000680 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000681 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700682 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000686 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 }
688
689 if (dummy_wrb) {
690 wrb = queue_head_node(txq);
691 wrb_fill(wrb, 0, 0);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 queue_head_inc(txq);
694 }
695
Somnath Koturcc4ce022010-10-21 07:11:14 -0700696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000700dma_err:
701 txq->head = map_head;
702 while (copied) {
703 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000704 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000705 map_single = false;
706 copied -= wrb->frag_len;
707 queue_head_inc(txq);
708 }
709 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710}
711
Somnath Kotur93040ae2012-06-26 22:32:10 +0000712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
Stephen Hemminger613573252009-08-31 19:50:58 +0000730static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700731 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732{
733 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000736 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000738 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 bool dummy_wrb, stopped = false;
740
Somnath Kotur93040ae2012-06-26 22:32:10 +0000741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
743
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000746 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 is_ipv4_pkt(skb)) {
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 }
752
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000760 if (unlikely(!skb))
761 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762 }
763
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765
Sathya Perla3c8def92011-06-12 20:01:58 +0000766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000767 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000768 int gso_segs = skb_shinfo(skb)->gso_segs;
769
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000770 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
Sathya Perla7101e112010-03-22 20:41:12 +0000778 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000782 stopped = true;
783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000785 be_txq_notify(adapter, txq->id, wrb_cnt);
786
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000792tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 */
Sathya Perla10329df2012-06-05 19:37:18 +0000818static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
Sathya Perla10329df2012-06-05 19:37:18 +0000820 u16 vids[BE_NUM_VLANS_SUPPORTED];
821 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000822 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000823
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
826 return 0;
827
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
830
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000834 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000835
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000837 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000838
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
840 if (status) {
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000845
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000847
848set_vlan_promisc:
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 NULL, 0, 1, 1);
851 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852}
853
Jiri Pirko8e586132011-12-08 19:52:37 -0500854static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855{
856 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000857 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000863
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000866 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500867
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000868 if (!status)
869 adapter->vlans_added++;
870 else
871 adapter->vlan_tag[vid] = 0;
872ret:
873 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874}
875
Jiri Pirko8e586132011-12-08 19:52:37 -0500876static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877{
878 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000879 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000881 if (!be_physfn(adapter)) {
882 status = -EINVAL;
883 goto ret;
884 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000885
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000887 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000888 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500889
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000890 if (!status)
891 adapter->vlans_added--;
892 else
893 adapter->vlan_tag[vid] = 1;
894ret:
895 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896}
897
Sathya Perlaa54769f2011-10-24 02:45:00 +0000898static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899{
900 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000901 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700902
903 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000905 adapter->promiscuous = true;
906 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000908
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300909 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000913
914 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000915 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000916 }
917
Sathya Perlae7b909a2009-11-22 22:01:10 +0000918 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000919 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000920 netdev_mc_count(netdev) > BE_MAX_MC) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000922 goto done;
923 }
924
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
928
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
932 }
933
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
937 goto done;
938 }
939
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 adapter->if_handle,
944 &adapter->pmac_id[adapter->uc_macs], 0);
945 }
946 }
947
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
951 if (status) {
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000956done:
957 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958}
959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000964 int status;
965
Sathya Perla11ac75e2011-12-13 00:58:50 +0000966 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000967 return -EPERM;
968
Sathya Perla11ac75e2011-12-13 00:58:50 +0000969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000970 return -EINVAL;
971
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000980 }
981
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000982 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000987
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000988 return status;
989}
990
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000991static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000996
Sathya Perla11ac75e2011-12-13 00:58:50 +0000997 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000998 return -EPERM;
999
Sathya Perla11ac75e2011-12-13 00:58:50 +00001000 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001001 return -EINVAL;
1002
1003 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001006 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001008
1009 return 0;
1010}
1011
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001012static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1014{
1015 struct be_adapter *adapter = netdev_priv(netdev);
1016 int status = 0;
1017
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001019 return -EPERM;
1020
Sathya Perla11ac75e2011-12-13 00:58:50 +00001021 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001022 return -EINVAL;
1023
1024 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1031 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001032 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001033 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001034 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001038 }
1039
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001040
1041 if (status)
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1044 return status;
1045}
1046
Ajit Khapardee1d18732010-07-23 01:52:13 +00001047static int be_set_vf_tx_rate(struct net_device *netdev,
1048 int vf, int rate)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 int status = 0;
1052
Sathya Perla11ac75e2011-12-13 00:58:50 +00001053 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001054 return -EPERM;
1055
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001056 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001057 return -EINVAL;
1058
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1062 return -EINVAL;
1063 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001064
Ajit Khaparde856c4012011-02-11 13:32:32 +00001065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001066
1067 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001068 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001069 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001070 else
1071 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001072 return status;
1073}
1074
Sathya Perla39f1d942012-05-08 19:41:24 +00001075static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076{
1077 struct pci_dev *dev, *pdev = adapter->pdev;
David S. Millerd9f72f32012-09-27 22:19:02 -04001078 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
Sathya Perla39f1d942012-05-08 19:41:24 +00001079 u16 offset, stride;
1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001082 if (!pos)
1083 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) {
David S. Millerd9f72f32012-09-27 22:19:02 -04001089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001092 vfs++;
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 assigned_vfs++;
1095 }
1096 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097 }
1098 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099}
1100
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001101static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001103 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001104 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001105 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001106 u64 pkts;
1107 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001109 if (!eqo->enable_aic) {
1110 eqd = eqo->eqd;
1111 goto modify_eqd;
1112 }
1113
1114 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001115 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001117 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118
Sathya Perla4097f662009-03-24 16:40:13 -07001119 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001120 if (time_before(now, stats->rx_jiffies)) {
1121 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001122 return;
1123 }
1124
Sathya Perlaac124ff2011-07-25 19:10:14 +00001125 /* Update once a second */
1126 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001127 return;
1128
Sathya Perlaab1594e2011-07-25 19:10:15 +00001129 do {
1130 start = u64_stats_fetch_begin_bh(&stats->sync);
1131 pkts = stats->rx_pkts;
1132 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001134 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001135 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001136 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001137 eqd = (stats->rx_pps / 110000) << 3;
1138 eqd = min(eqd, eqo->max_eqd);
1139 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001140 if (eqd < 10)
1141 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142
1143modify_eqd:
1144 if (eqd != eqo->cur_eqd) {
1145 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001147 }
Sathya Perla4097f662009-03-24 16:40:13 -07001148}
1149
Sathya Perla3abcded2010-10-03 22:12:27 -07001150static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001151 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001152{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001153 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001154
Sathya Perlaab1594e2011-07-25 19:10:15 +00001155 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001156 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001157 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001158 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001159 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001160 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001161 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001162 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001163 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164}
1165
Sathya Perla2e588f82011-03-11 02:49:26 +00001166static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001167{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001168 /* L4 checksum is not reliable for non TCP/UDP packets.
1169 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001170 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001172}
1173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001174static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001177 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001179 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180
Sathya Perla3abcded2010-10-03 22:12:27 -07001181 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 BUG_ON(!rx_page_info->page);
1183
Ajit Khaparde205859a2010-02-09 01:34:21 +00001184 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001185 dma_unmap_page(&adapter->pdev->dev,
1186 dma_unmap_addr(rx_page_info, bus),
1187 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001188 rx_page_info->last_page_user = false;
1189 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190
1191 atomic_dec(&rxq->used);
1192 return rx_page_info;
1193}
1194
1195/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001196static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198{
Sathya Perla3abcded2010-10-03 22:12:27 -07001199 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001201 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001203 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001204 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001205 put_page(page_info->page);
1206 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001207 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 }
1209}
1210
1211/*
1212 * skb_fill_rx_data forms a complete skb for an ether frame
1213 * indicated by rxcp.
1214 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001215static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217{
Sathya Perla3abcded2010-10-03 22:12:27 -07001218 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 u16 i, j;
1221 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 u8 *start;
1223
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001224 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225 start = page_address(page_info->page) + page_info->page_offset;
1226 prefetch(start);
1227
1228 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231 skb->len = curr_frag_len;
1232 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001233 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234 /* Complete packet has now been moved to data */
1235 put_page(page_info->page);
1236 skb->data_len = 0;
1237 skb->tail += curr_frag_len;
1238 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001239 hdr_len = ETH_HLEN;
1240 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001242 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243 skb_shinfo(skb)->frags[0].page_offset =
1244 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001245 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001247 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 skb->tail += hdr_len;
1249 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001250 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251
Sathya Perla2e588f82011-03-11 02:49:26 +00001252 if (rxcp->pkt_size <= rx_frag_size) {
1253 BUG_ON(rxcp->num_rcvd != 1);
1254 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 }
1256
1257 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001258 index_inc(&rxcp->rxq_idx, rxq->len);
1259 remaining = rxcp->pkt_size - curr_frag_len;
1260 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001264 /* Coalesce all frags from the same physical page in one slot */
1265 if (page_info->page_offset == 0) {
1266 /* Fresh page */
1267 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001268 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001269 skb_shinfo(skb)->frags[j].page_offset =
1270 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001271 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001272 skb_shinfo(skb)->nr_frags++;
1273 } else {
1274 put_page(page_info->page);
1275 }
1276
Eric Dumazet9e903e02011-10-18 21:00:24 +00001277 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 skb->len += curr_frag_len;
1279 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001280 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001281 remaining -= curr_frag_len;
1282 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001283 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001285 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286}
1287
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001288/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001289static void be_rx_compl_process(struct be_rx_obj *rxo,
1290 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001292 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001293 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001295
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001296 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001297 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001298 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001299 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300 return;
1301 }
1302
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001303 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001305 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001306 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001307 else
1308 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001309
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001310 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001311 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001312 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001313 skb->rxhash = rxcp->rss_hash;
1314
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315
Jiri Pirko343e43c2011-08-25 02:50:51 +00001316 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
1319 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320}
1321
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001322/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001323void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001326 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001328 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001329 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001330 u16 remaining, curr_frag_len;
1331 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001333 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001334 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001335 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001336 return;
1337 }
1338
Sathya Perla2e588f82011-03-11 02:49:26 +00001339 remaining = rxcp->pkt_size;
1340 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001341 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342
1343 curr_frag_len = min(remaining, rx_frag_size);
1344
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001345 /* Coalesce all frags from the same physical page in one slot */
1346 if (i == 0 || page_info->page_offset == 0) {
1347 /* First frag or Fresh page */
1348 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001349 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001350 skb_shinfo(skb)->frags[j].page_offset =
1351 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001352 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001353 } else {
1354 put_page(page_info->page);
1355 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001356 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001357 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001359 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 memset(page_info, 0, sizeof(*page_info));
1361 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001362 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001364 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001365 skb->len = rxcp->pkt_size;
1366 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001367 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001369 if (adapter->netdev->features & NETIF_F_RXHASH)
1370 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001371
Jiri Pirko343e43c2011-08-25 02:50:51 +00001372 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001373 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001375 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376}
1377
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001378static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380{
Sathya Perla2e588f82011-03-11 02:49:26 +00001381 rxcp->pkt_size =
1382 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001386 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001387 rxcp->ip_csum =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389 rxcp->l4_csum =
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391 rxcp->ipv6 =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393 rxcp->rxq_idx =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395 rxcp->num_rcvd =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397 rxcp->pkt_type =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001399 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001400 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001401 if (rxcp->vlanf) {
1402 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001403 compl);
1404 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001406 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001407 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001408}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001410static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001412{
1413 rxcp->pkt_size =
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001419 rxcp->ip_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421 rxcp->l4_csum =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423 rxcp->ipv6 =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425 rxcp->rxq_idx =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427 rxcp->num_rcvd =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429 rxcp->pkt_type =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001431 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001432 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001433 if (rxcp->vlanf) {
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001435 compl);
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001438 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001440}
1441
1442static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443{
1444 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446 struct be_adapter *adapter = rxo->adapter;
1447
1448 /* For checking the valid bit it is Ok to use either definition as the
1449 * valid bit is at the same position in both v0 and v1 Rx compl */
1450 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 return NULL;
1452
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001453 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001454 be_dws_le_to_cpu(compl, sizeof(*compl));
1455
1456 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001457 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001458 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001459 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001460
Sathya Perla15d72182011-03-21 20:49:26 +00001461 if (rxcp->vlanf) {
1462 /* vlanf could be wrongly set in some cards.
1463 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001464 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001465 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001466
Sathya Perla15d72182011-03-21 20:49:26 +00001467 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001468 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001469
Somnath Kotur939cf302011-08-18 21:51:49 -07001470 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001471 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001472 rxcp->vlanf = 0;
1473 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001474
1475 /* As the compl has been parsed, reset it; we wont touch it again */
1476 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Sathya Perla3abcded2010-10-03 22:12:27 -07001478 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 return rxcp;
1480}
1481
Eric Dumazet1829b082011-03-01 05:48:12 +00001482static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001485
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001487 gfp |= __GFP_COMP;
1488 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489}
1490
1491/*
1492 * Allocate a page, split it to fragments of size rx_frag_size and post as
1493 * receive buffers to BE
1494 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001495static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496{
Sathya Perla3abcded2010-10-03 22:12:27 -07001497 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001498 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001499 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 struct page *pagep = NULL;
1501 struct be_eth_rx_d *rxd;
1502 u64 page_dmaaddr = 0, frag_dmaaddr;
1503 u32 posted, page_offset = 0;
1504
Sathya Perla3abcded2010-10-03 22:12:27 -07001505 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001508 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001510 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 break;
1512 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001513 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514 0, adapter->big_page_size,
1515 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 page_info->page_offset = 0;
1517 } else {
1518 get_page(pagep);
1519 page_info->page_offset = page_offset + rx_frag_size;
1520 }
1521 page_offset = page_info->page_offset;
1522 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001523 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525
1526 rxd = queue_head_node(rxq);
1527 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529
1530 /* Any space left in the current big page for another frag? */
1531 if ((page_offset + rx_frag_size + rx_frag_size) >
1532 adapter->big_page_size) {
1533 pagep = NULL;
1534 page_info->last_page_user = true;
1535 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001536
1537 prev_page_info = page_info;
1538 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001539 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 }
1541 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001542 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543
1544 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001546 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001547 } else if (atomic_read(&rxq->used) == 0) {
1548 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001549 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551}
1552
Sathya Perla5fb379e2009-06-18 00:02:59 +00001553static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556
1557 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558 return NULL;
1559
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001560 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562
1563 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564
1565 queue_tail_inc(tx_cq);
1566 return txcp;
1567}
1568
Sathya Perla3c8def92011-06-12 20:01:58 +00001569static u16 be_tx_compl_process(struct be_adapter *adapter,
1570 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571{
Sathya Perla3c8def92011-06-12 20:01:58 +00001572 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001573 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001574 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001576 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001579 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001581 sent_skbs[txq->tail] = NULL;
1582
1583 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001584 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001586 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001588 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001589 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001591 unmap_skb_hdr = false;
1592
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 num_wrbs++;
1594 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001595 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001598 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599}
1600
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601/* Return the number of events in the event queue */
1602static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001603{
1604 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001605 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001606
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001607 do {
1608 eqe = queue_tail_node(&eqo->q);
1609 if (eqe->evt == 0)
1610 break;
1611
1612 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001613 eqe->evt = 0;
1614 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001615 queue_tail_inc(&eqo->q);
1616 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001617
1618 return num;
1619}
1620
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001621static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001622{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001623 bool rearm = false;
1624 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001625
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001626 /* Deal with any spurious interrupts that come without events */
1627 if (!num)
1628 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001629
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001630 if (num || msix_enabled(eqo->adapter))
1631 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632
Sathya Perla859b1e42009-08-10 03:43:51 +00001633 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001634 napi_schedule(&eqo->napi);
1635
1636 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001637}
1638
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001639/* Leaves the EQ is disarmed state */
1640static void be_eq_clean(struct be_eq_obj *eqo)
1641{
1642 int num = events_get(eqo);
1643
1644 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1645}
1646
1647static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648{
1649 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001650 struct be_queue_info *rxq = &rxo->q;
1651 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 u16 tail;
1654
1655 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001656 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657 be_rx_compl_discard(rxo, rxcp);
1658 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 }
1660
1661 /* Then free posted rx buffer that were not used */
1662 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001663 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665 put_page(page_info->page);
1666 memset(page_info, 0, sizeof(*page_info));
1667 }
1668 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001669 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670}
1671
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001672static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001674 struct be_tx_obj *txo;
1675 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001676 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001677 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001678 struct sk_buff *sent_skb;
1679 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001680 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681
Sathya Perlaa8e91792009-08-10 03:42:43 +00001682 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1683 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001684 pending_txqs = adapter->num_tx_qs;
1685
1686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 while ((txcp = be_tx_compl_get(&txo->cq))) {
1689 end_idx =
1690 AMAP_GET_BITS(struct amap_eth_tx_compl,
1691 wrb_index, txcp);
1692 num_wrbs += be_tx_compl_process(adapter, txo,
1693 end_idx);
1694 cmpl++;
1695 }
1696 if (cmpl) {
1697 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698 atomic_sub(num_wrbs, &txq->used);
1699 cmpl = 0;
1700 num_wrbs = 0;
1701 }
1702 if (atomic_read(&txq->used) == 0)
1703 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001704 }
1705
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001706 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001707 break;
1708
1709 mdelay(1);
1710 } while (true);
1711
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001712 for_all_tx_queues(adapter, txo, i) {
1713 txq = &txo->q;
1714 if (atomic_read(&txq->used))
1715 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001717
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001718 /* free posted tx for which compls will never arrive */
1719 while (atomic_read(&txq->used)) {
1720 sent_skb = txo->sent_skb_list[txq->tail];
1721 end_idx = txq->tail;
1722 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723 &dummy_wrb);
1724 index_adv(&end_idx, num_wrbs - 1, txq->len);
1725 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726 atomic_sub(num_wrbs, &txq->used);
1727 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001728 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729}
1730
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001731static void be_evt_queues_destroy(struct be_adapter *adapter)
1732{
1733 struct be_eq_obj *eqo;
1734 int i;
1735
1736 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001737 if (eqo->q.created) {
1738 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001739 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001740 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001741 be_queue_free(adapter, &eqo->q);
1742 }
1743}
1744
1745static int be_evt_queues_create(struct be_adapter *adapter)
1746{
1747 struct be_queue_info *eq;
1748 struct be_eq_obj *eqo;
1749 int i, rc;
1750
1751 adapter->num_evt_qs = num_irqs(adapter);
1752
1753 for_all_evt_queues(adapter, eqo, i) {
1754 eqo->adapter = adapter;
1755 eqo->tx_budget = BE_TX_BUDGET;
1756 eqo->idx = i;
1757 eqo->max_eqd = BE_MAX_EQD;
1758 eqo->enable_aic = true;
1759
1760 eq = &eqo->q;
1761 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762 sizeof(struct be_eq_entry));
1763 if (rc)
1764 return rc;
1765
1766 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1767 if (rc)
1768 return rc;
1769 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001770 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001771}
1772
Sathya Perla5fb379e2009-06-18 00:02:59 +00001773static void be_mcc_queues_destroy(struct be_adapter *adapter)
1774{
1775 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001776
Sathya Perla8788fdc2009-07-27 22:52:03 +00001777 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001778 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001779 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001780 be_queue_free(adapter, q);
1781
Sathya Perla8788fdc2009-07-27 22:52:03 +00001782 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001783 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001784 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001785 be_queue_free(adapter, q);
1786}
1787
1788/* Must be called only after TX qs are created as MCC shares TX EQ */
1789static int be_mcc_queues_create(struct be_adapter *adapter)
1790{
1791 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001792
Sathya Perla8788fdc2009-07-27 22:52:03 +00001793 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001794 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001795 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001796 goto err;
1797
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001798 /* Use the default EQ for MCC completions */
1799 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001800 goto mcc_cq_free;
1801
Sathya Perla8788fdc2009-07-27 22:52:03 +00001802 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001803 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1804 goto mcc_cq_destroy;
1805
Sathya Perla8788fdc2009-07-27 22:52:03 +00001806 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001807 goto mcc_q_free;
1808
1809 return 0;
1810
1811mcc_q_free:
1812 be_queue_free(adapter, q);
1813mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001814 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001815mcc_cq_free:
1816 be_queue_free(adapter, cq);
1817err:
1818 return -1;
1819}
1820
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821static void be_tx_queues_destroy(struct be_adapter *adapter)
1822{
1823 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001824 struct be_tx_obj *txo;
1825 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826
Sathya Perla3c8def92011-06-12 20:01:58 +00001827 for_all_tx_queues(adapter, txo, i) {
1828 q = &txo->q;
1829 if (q->created)
1830 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1831 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832
Sathya Perla3c8def92011-06-12 20:01:58 +00001833 q = &txo->cq;
1834 if (q->created)
1835 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1836 be_queue_free(adapter, q);
1837 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838}
1839
Sathya Perladafc0fe2011-10-24 02:45:02 +00001840static int be_num_txqs_want(struct be_adapter *adapter)
1841{
Sathya Perla39f1d942012-05-08 19:41:24 +00001842 if (sriov_want(adapter) || be_is_mc(adapter) ||
1843 lancer_chip(adapter) || !be_physfn(adapter) ||
1844 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001845 return 1;
1846 else
1847 return MAX_TX_QS;
1848}
1849
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001850static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001852 struct be_queue_info *cq, *eq;
1853 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001854 struct be_tx_obj *txo;
1855 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856
Sathya Perladafc0fe2011-10-24 02:45:02 +00001857 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001858 if (adapter->num_tx_qs != MAX_TX_QS) {
1859 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001860 netif_set_real_num_tx_queues(adapter->netdev,
1861 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001862 rtnl_unlock();
1863 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001864
Sathya Perla3c8def92011-06-12 20:01:58 +00001865 for_all_tx_queues(adapter, txo, i) {
1866 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001867 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1868 sizeof(struct be_eth_tx_compl));
1869 if (status)
1870 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001872 /* If num_evt_qs is less than num_tx_qs, then more than
1873 * one txq share an eq
1874 */
1875 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1876 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1877 if (status)
1878 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001879 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881}
1882
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883static int be_tx_qs_create(struct be_adapter *adapter)
1884{
1885 struct be_tx_obj *txo;
1886 int i, status;
1887
1888 for_all_tx_queues(adapter, txo, i) {
1889 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1890 sizeof(struct be_eth_wrb));
1891 if (status)
1892 return status;
1893
1894 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1895 if (status)
1896 return status;
1897 }
1898
1899 return 0;
1900}
1901
1902static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903{
1904 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001905 struct be_rx_obj *rxo;
1906 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
Sathya Perla3abcded2010-10-03 22:12:27 -07001908 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001909 q = &rxo->cq;
1910 if (q->created)
1911 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1912 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914}
1915
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001916static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001917{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001918 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001919 struct be_rx_obj *rxo;
1920 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001922 /* We'll create as many RSS rings as there are irqs.
1923 * But when there's only one irq there's no use creating RSS rings
1924 */
1925 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1926 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001927 if (adapter->num_rx_qs != MAX_RX_QS) {
1928 rtnl_lock();
1929 netif_set_real_num_rx_queues(adapter->netdev,
1930 adapter->num_rx_qs);
1931 rtnl_unlock();
1932 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001933
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001935 for_all_rx_queues(adapter, rxo, i) {
1936 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001937 cq = &rxo->cq;
1938 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1939 sizeof(struct be_eth_rx_compl));
1940 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001941 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1944 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001945 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001946 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001947 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949 if (adapter->num_rx_qs != MAX_RX_QS)
1950 dev_info(&adapter->pdev->dev,
Masanari Iidaf3f9f332012-08-03 02:36:51 +00001951 "Created only %d receive queues\n", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001953 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001954}
1955
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956static irqreturn_t be_intx(int irq, void *dev)
1957{
1958 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001961 /* With INTx only one EQ is used */
1962 num_evts = event_handle(&adapter->eq_obj[0]);
1963 if (num_evts)
1964 return IRQ_HANDLED;
1965 else
1966 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967}
1968
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001971 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001973 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974 return IRQ_HANDLED;
1975}
1976
Sathya Perla2e588f82011-03-11 02:49:26 +00001977static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978{
Sathya Perla2e588f82011-03-11 02:49:26 +00001979 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001980}
1981
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1983 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984{
Sathya Perla3abcded2010-10-03 22:12:27 -07001985 struct be_adapter *adapter = rxo->adapter;
1986 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001987 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 u32 work_done;
1989
1990 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992 if (!rxcp)
1993 break;
1994
Sathya Perla12004ae2011-08-02 19:57:46 +00001995 /* Is it a flush compl that has no data */
1996 if (unlikely(rxcp->num_rcvd == 0))
1997 goto loop_continue;
1998
1999 /* Discard compl with partial DMA Lancer B0 */
2000 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002001 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002002 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002003 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002004
Sathya Perla12004ae2011-08-02 19:57:46 +00002005 /* On BE drop pkts that arrive due to imperfect filtering in
2006 * promiscuous mode on some skews
2007 */
2008 if (unlikely(rxcp->port != adapter->port_num &&
2009 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002010 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002011 goto loop_continue;
2012 }
2013
2014 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002016 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002017 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002018loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002019 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 }
2021
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002022 if (work_done) {
2023 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2026 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002028
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029 return work_done;
2030}
2031
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2033 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038 for (work_done = 0; work_done < budget; work_done++) {
2039 txcp = be_tx_compl_get(&txo->cq);
2040 if (!txcp)
2041 break;
2042 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002043 AMAP_GET_BITS(struct amap_eth_tx_compl,
2044 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045 }
2046
2047 if (work_done) {
2048 be_cq_notify(adapter, txo->cq.id, true, work_done);
2049 atomic_sub(num_wrbs, &txo->q.used);
2050
2051 /* As Tx wrbs have been freed up, wake up netdev queue
2052 * if it was stopped due to lack of tx wrbs. */
2053 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2054 atomic_read(&txo->q.used) < txo->q.len / 2) {
2055 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002056 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002057
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2059 tx_stats(txo)->tx_compl += work_done;
2060 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2061 }
2062 return (work_done < budget); /* Done */
2063}
Sathya Perla3c8def92011-06-12 20:01:58 +00002064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002065int be_poll(struct napi_struct *napi, int budget)
2066{
2067 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2068 struct be_adapter *adapter = eqo->adapter;
2069 int max_work = 0, work, i;
2070 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002071
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002072 /* Process all TXQs serviced by this EQ */
2073 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2074 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2075 eqo->tx_budget, i);
2076 if (!tx_done)
2077 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078 }
2079
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002080 /* This loop will iterate twice for EQ0 in which
2081 * completions of the last RXQ (default one) are also processed
2082 * For other EQs the loop iterates only once
2083 */
2084 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2085 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2086 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002087 }
2088
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002089 if (is_mcc_eqo(eqo))
2090 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002091
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002092 if (max_work < budget) {
2093 napi_complete(napi);
2094 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2095 } else {
2096 /* As we'll continue in polling mode, count and clear events */
2097 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002098 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002100}
2101
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002102void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002103{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002104 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2105 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002106 u32 i;
2107
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002108 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002109 return;
2110
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002111 if (lancer_chip(adapter)) {
2112 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2113 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2114 sliport_err1 = ioread32(adapter->db +
2115 SLIPORT_ERROR1_OFFSET);
2116 sliport_err2 = ioread32(adapter->db +
2117 SLIPORT_ERROR2_OFFSET);
2118 }
2119 } else {
2120 pci_read_config_dword(adapter->pdev,
2121 PCICFG_UE_STATUS_LOW, &ue_lo);
2122 pci_read_config_dword(adapter->pdev,
2123 PCICFG_UE_STATUS_HIGH, &ue_hi);
2124 pci_read_config_dword(adapter->pdev,
2125 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2126 pci_read_config_dword(adapter->pdev,
2127 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002128
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002129 ue_lo = (ue_lo & ~ue_lo_mask);
2130 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002131 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002132
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002133 if (ue_lo || ue_hi ||
2134 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002135 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002136 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002137 "Error detected in the card\n");
2138 }
2139
2140 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2141 dev_err(&adapter->pdev->dev,
2142 "ERR: sliport status 0x%x\n", sliport_status);
2143 dev_err(&adapter->pdev->dev,
2144 "ERR: sliport error1 0x%x\n", sliport_err1);
2145 dev_err(&adapter->pdev->dev,
2146 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002147 }
2148
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002149 if (ue_lo) {
2150 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2151 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002152 dev_err(&adapter->pdev->dev,
2153 "UE: %s bit set\n", ue_status_low_desc[i]);
2154 }
2155 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002156
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002157 if (ue_hi) {
2158 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2159 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002160 dev_err(&adapter->pdev->dev,
2161 "UE: %s bit set\n", ue_status_hi_desc[i]);
2162 }
2163 }
2164
2165}
2166
Sathya Perla8d56ff12009-11-22 22:02:26 +00002167static void be_msix_disable(struct be_adapter *adapter)
2168{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002169 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002170 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002171 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002172 }
2173}
2174
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002175static uint be_num_rss_want(struct be_adapter *adapter)
2176{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002177 u32 num = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002178 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla4cbdaf62012-08-28 20:37:40 +00002179 !sriov_want(adapter) && be_physfn(adapter)) {
Yuval Mintz30e80b52012-07-01 03:19:00 +00002180 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2181 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2182 }
2183 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002184}
2185
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186static void be_msix_enable(struct be_adapter *adapter)
2187{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002188#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002189 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191 /* If RSS queues are not used, need a vec for default RX Q */
2192 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002193 if (be_roce_supported(adapter)) {
2194 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2195 (num_online_cpus() + 1));
2196 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2197 num_vec += num_roce_vec;
2198 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2199 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002201
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002202 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203 adapter->msix_entries[i].entry = i;
2204
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002205 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002206 if (status == 0) {
2207 goto done;
2208 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002209 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002210 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002211 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002213 }
2214 return;
2215done:
Parav Pandit045508a2012-03-26 14:27:13 +00002216 if (be_roce_supported(adapter)) {
2217 if (num_vec > num_roce_vec) {
2218 adapter->num_msix_vec = num_vec - num_roce_vec;
2219 adapter->num_msix_roce_vec =
2220 num_vec - adapter->num_msix_vec;
2221 } else {
2222 adapter->num_msix_vec = num_vec;
2223 adapter->num_msix_roce_vec = 0;
2224 }
2225 } else
2226 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002227 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228}
2229
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002230static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234}
2235
2236static int be_msix_register(struct be_adapter *adapter)
2237{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238 struct net_device *netdev = adapter->netdev;
2239 struct be_eq_obj *eqo;
2240 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 for_all_evt_queues(adapter, eqo, i) {
2243 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2244 vec = be_msix_vec_get(adapter, eqo);
2245 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002246 if (status)
2247 goto err_msix;
2248 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002249
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002251err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2253 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2254 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2255 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002256 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257 return status;
2258}
2259
2260static int be_irq_register(struct be_adapter *adapter)
2261{
2262 struct net_device *netdev = adapter->netdev;
2263 int status;
2264
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002265 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266 status = be_msix_register(adapter);
2267 if (status == 0)
2268 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002269 /* INTx is not supported for VF */
2270 if (!be_physfn(adapter))
2271 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 }
2273
2274 /* INTx */
2275 netdev->irq = adapter->pdev->irq;
2276 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2277 adapter);
2278 if (status) {
2279 dev_err(&adapter->pdev->dev,
2280 "INTx request IRQ failed - err %d\n", status);
2281 return status;
2282 }
2283done:
2284 adapter->isr_registered = true;
2285 return 0;
2286}
2287
2288static void be_irq_unregister(struct be_adapter *adapter)
2289{
2290 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002292 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293
2294 if (!adapter->isr_registered)
2295 return;
2296
2297 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002298 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299 free_irq(netdev->irq, adapter);
2300 goto done;
2301 }
2302
2303 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304 for_all_evt_queues(adapter, eqo, i)
2305 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002306
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307done:
2308 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309}
2310
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002312{
2313 struct be_queue_info *q;
2314 struct be_rx_obj *rxo;
2315 int i;
2316
2317 for_all_rx_queues(adapter, rxo, i) {
2318 q = &rxo->q;
2319 if (q->created) {
2320 be_cmd_rxq_destroy(adapter, q);
2321 /* After the rxq is invalidated, wait for a grace time
2322 * of 1ms for all dma to end and the flush compl to
2323 * arrive
2324 */
2325 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002327 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002328 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002329 }
2330}
2331
Sathya Perla889cd4b2010-05-30 23:33:45 +00002332static int be_close(struct net_device *netdev)
2333{
2334 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 struct be_eq_obj *eqo;
2336 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002337
Parav Pandit045508a2012-03-26 14:27:13 +00002338 be_roce_dev_close(adapter);
2339
Sathya Perla889cd4b2010-05-30 23:33:45 +00002340 be_async_mcc_disable(adapter);
2341
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002342 if (!lancer_chip(adapter))
2343 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002344
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002345 for_all_evt_queues(adapter, eqo, i) {
2346 napi_disable(&eqo->napi);
2347 if (msix_enabled(adapter))
2348 synchronize_irq(be_msix_vec_get(adapter, eqo));
2349 else
2350 synchronize_irq(netdev->irq);
2351 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002352 }
2353
Sathya Perla889cd4b2010-05-30 23:33:45 +00002354 be_irq_unregister(adapter);
2355
Sathya Perla889cd4b2010-05-30 23:33:45 +00002356 /* Wait for all pending tx completions to arrive so that
2357 * all tx skbs are freed.
2358 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002359 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002360
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002362 return 0;
2363}
2364
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002365static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002366{
2367 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002368 int rc, i, j;
2369 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002370
2371 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2373 sizeof(struct be_eth_rx_d));
2374 if (rc)
2375 return rc;
2376 }
2377
2378 /* The FW would like the default RXQ to be created first */
2379 rxo = default_rxo(adapter);
2380 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2381 adapter->if_handle, false, &rxo->rss_id);
2382 if (rc)
2383 return rc;
2384
2385 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002386 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 rx_frag_size, adapter->if_handle,
2388 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002389 if (rc)
2390 return rc;
2391 }
2392
2393 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002394 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2395 for_all_rss_queues(adapter, rxo, i) {
2396 if ((j + i) >= 128)
2397 break;
2398 rsstable[j + i] = rxo->rss_id;
2399 }
2400 }
2401 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002402 if (rc)
2403 return rc;
2404 }
2405
2406 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002408 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002409 return 0;
2410}
2411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002412static int be_open(struct net_device *netdev)
2413{
2414 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002416 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002417 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002418 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002419 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002420
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002422 if (status)
2423 goto err;
2424
Sathya Perla5fb379e2009-06-18 00:02:59 +00002425 be_irq_register(adapter);
2426
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002427 if (!lancer_chip(adapter))
2428 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002429
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002431 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002432
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 for_all_tx_queues(adapter, txo, i)
2434 be_cq_notify(adapter, txo->cq.id, true, 0);
2435
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002436 be_async_mcc_enable(adapter);
2437
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 for_all_evt_queues(adapter, eqo, i) {
2439 napi_enable(&eqo->napi);
2440 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2441 }
2442
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002443 status = be_cmd_link_status_query(adapter, NULL, NULL,
2444 &link_status, 0);
2445 if (!status)
2446 be_link_status_update(adapter, link_status);
2447
Parav Pandit045508a2012-03-26 14:27:13 +00002448 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002449 return 0;
2450err:
2451 be_close(adapter->netdev);
2452 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002453}
2454
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002455static int be_setup_wol(struct be_adapter *adapter, bool enable)
2456{
2457 struct be_dma_mem cmd;
2458 int status = 0;
2459 u8 mac[ETH_ALEN];
2460
2461 memset(mac, 0, ETH_ALEN);
2462
2463 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002464 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2465 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002466 if (cmd.va == NULL)
2467 return -1;
2468 memset(cmd.va, 0, cmd.size);
2469
2470 if (enable) {
2471 status = pci_write_config_dword(adapter->pdev,
2472 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2473 if (status) {
2474 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002475 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002476 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2477 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002478 return status;
2479 }
2480 status = be_cmd_enable_magic_wol(adapter,
2481 adapter->netdev->dev_addr, &cmd);
2482 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2483 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2484 } else {
2485 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2486 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2487 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2488 }
2489
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002490 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002491 return status;
2492}
2493
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002494/*
2495 * Generate a seed MAC address from the PF MAC Address using jhash.
2496 * MAC Address for VFs are assigned incrementally starting from the seed.
2497 * These addresses are programmed in the ASIC by the PF and the VF driver
2498 * queries for the MAC address during its probe.
2499 */
2500static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2501{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002502 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002503 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002504 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002506
2507 be_vf_eth_addr_generate(adapter, mac);
2508
Sathya Perla11ac75e2011-12-13 00:58:50 +00002509 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002510 if (lancer_chip(adapter)) {
2511 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2512 } else {
2513 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002514 vf_cfg->if_handle,
2515 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002516 }
2517
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002518 if (status)
2519 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002520 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002521 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002522 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002523
2524 mac[5] += 1;
2525 }
2526 return status;
2527}
2528
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002529static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002530{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002531 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002532 u32 vf;
2533
Sathya Perla39f1d942012-05-08 19:41:24 +00002534 if (be_find_vfs(adapter, ASSIGNED)) {
2535 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2536 goto done;
2537 }
2538
Sathya Perla11ac75e2011-12-13 00:58:50 +00002539 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002540 if (lancer_chip(adapter))
2541 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2542 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002543 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2544 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002545
Sathya Perla11ac75e2011-12-13 00:58:50 +00002546 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2547 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002548 pci_disable_sriov(adapter->pdev);
2549done:
2550 kfree(adapter->vf_cfg);
2551 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002552}
2553
Sathya Perlaa54769f2011-10-24 02:45:00 +00002554static int be_clear(struct be_adapter *adapter)
2555{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002556 int i = 1;
2557
Sathya Perla191eb752012-02-23 18:50:13 +00002558 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2559 cancel_delayed_work_sync(&adapter->work);
2560 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2561 }
2562
Sathya Perla11ac75e2011-12-13 00:58:50 +00002563 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002564 be_vf_clear(adapter);
2565
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002566 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2567 be_cmd_pmac_del(adapter, adapter->if_handle,
2568 adapter->pmac_id[i], 0);
2569
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002571
2572 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002573 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002574 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002575 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002576
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002577 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002578 return 0;
2579}
2580
Sathya Perla39f1d942012-05-08 19:41:24 +00002581static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002582{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002583 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002584 int vf;
2585
Sathya Perla39f1d942012-05-08 19:41:24 +00002586 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2587 GFP_KERNEL);
2588 if (!adapter->vf_cfg)
2589 return -ENOMEM;
2590
Sathya Perla11ac75e2011-12-13 00:58:50 +00002591 for_all_vfs(adapter, vf_cfg, vf) {
2592 vf_cfg->if_handle = -1;
2593 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002594 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002595 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002596}
2597
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002598static int be_vf_setup(struct be_adapter *adapter)
2599{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002600 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002601 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002602 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002603 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002604 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002605
Sathya Perla39f1d942012-05-08 19:41:24 +00002606 enabled_vfs = be_find_vfs(adapter, ENABLED);
2607 if (enabled_vfs) {
2608 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2609 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2610 return 0;
2611 }
2612
2613 if (num_vfs > adapter->dev_num_vfs) {
2614 dev_warn(dev, "Device supports %d VFs and not %d\n",
2615 adapter->dev_num_vfs, num_vfs);
2616 num_vfs = adapter->dev_num_vfs;
2617 }
2618
2619 status = pci_enable_sriov(adapter->pdev, num_vfs);
2620 if (!status) {
2621 adapter->num_vfs = num_vfs;
2622 } else {
2623 /* Platform doesn't support SRIOV though device supports it */
2624 dev_warn(dev, "SRIOV enable failed\n");
2625 return 0;
2626 }
2627
2628 status = be_vf_setup_init(adapter);
2629 if (status)
2630 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002631
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002632 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2633 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002634 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002635 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2636 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002637 if (status)
2638 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002639 }
2640
Sathya Perla39f1d942012-05-08 19:41:24 +00002641 if (!enabled_vfs) {
2642 status = be_vf_eth_addr_config(adapter);
2643 if (status)
2644 goto err;
2645 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002646
Sathya Perla11ac75e2011-12-13 00:58:50 +00002647 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002648 lnk_speed = 1000;
2649 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002650 if (status)
2651 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002652 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002653
2654 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2655 vf + 1, vf_cfg->if_handle);
2656 if (status)
2657 goto err;
2658 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002659 }
2660 return 0;
2661err:
2662 return status;
2663}
2664
Sathya Perla30128032011-11-10 19:17:57 +00002665static void be_setup_init(struct be_adapter *adapter)
2666{
2667 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002668 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002669 adapter->if_handle = -1;
2670 adapter->be3_native = false;
2671 adapter->promiscuous = false;
2672 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002673 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002674}
2675
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002676static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2677 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002678{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002679 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002680
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002681 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2682 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2683 if (!lancer_chip(adapter) && !be_physfn(adapter))
2684 *active_mac = true;
2685 else
2686 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002687
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002688 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002689 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002690
2691 if (lancer_chip(adapter)) {
2692 status = be_cmd_get_mac_from_list(adapter, mac,
2693 active_mac, pmac_id, 0);
2694 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002695 status = be_cmd_mac_addr_query(adapter, mac, false,
2696 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002697 }
2698 } else if (be_physfn(adapter)) {
2699 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002700 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002701 *active_mac = false;
2702 } else {
2703 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002704 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002705 if_handle, 0);
2706 *active_mac = true;
2707 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002708 return status;
2709}
2710
Sathya Perla39f1d942012-05-08 19:41:24 +00002711/* Routine to query per function resource limits */
2712static int be_get_config(struct be_adapter *adapter)
2713{
2714 int pos;
2715 u16 dev_num_vfs;
2716
2717 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2718 if (pos) {
2719 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2720 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002721 if (!lancer_chip(adapter))
2722 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002723 adapter->dev_num_vfs = dev_num_vfs;
2724 }
2725 return 0;
2726}
2727
Sathya Perla5fb379e2009-06-18 00:02:59 +00002728static int be_setup(struct be_adapter *adapter)
2729{
Sathya Perla39f1d942012-05-08 19:41:24 +00002730 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002731 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002732 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002733 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002734 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002735 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002736
Sathya Perla30128032011-11-10 19:17:57 +00002737 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002738
Sathya Perla39f1d942012-05-08 19:41:24 +00002739 be_get_config(adapter);
2740
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002741 be_cmd_req_native_mode(adapter);
2742
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002743 be_msix_enable(adapter);
2744
2745 status = be_evt_queues_create(adapter);
2746 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002747 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002748
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002749 status = be_tx_cqs_create(adapter);
2750 if (status)
2751 goto err;
2752
2753 status = be_rx_cqs_create(adapter);
2754 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002755 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002756
Sathya Perla5fb379e2009-06-18 00:02:59 +00002757 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002758 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002759 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002760
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002761 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2762 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2763 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002764 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2765
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002766 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2767 cap_flags |= BE_IF_FLAGS_RSS;
2768 en_flags |= BE_IF_FLAGS_RSS;
2769 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002770
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002771 if (lancer_chip(adapter) && !be_physfn(adapter)) {
2772 en_flags = BE_IF_FLAGS_UNTAGGED |
2773 BE_IF_FLAGS_BROADCAST |
2774 BE_IF_FLAGS_MULTICAST;
2775 cap_flags = en_flags;
2776 }
2777
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002778 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002779 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002780 if (status != 0)
2781 goto err;
2782
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002783 memset(mac, 0, ETH_ALEN);
2784 active_mac = false;
2785 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2786 &active_mac, &adapter->pmac_id[0]);
2787 if (status != 0)
2788 goto err;
2789
2790 if (!active_mac) {
2791 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2792 &adapter->pmac_id[0], 0);
2793 if (status != 0)
2794 goto err;
2795 }
2796
2797 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2798 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2799 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002800 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002801
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002802 status = be_tx_qs_create(adapter);
2803 if (status)
2804 goto err;
2805
Sathya Perla04b71172011-09-27 13:30:27 -04002806 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002807
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002808 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002809 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002810
2811 be_set_rx_mode(adapter->netdev);
2812
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002813 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002814
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002815 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2816 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002817 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002818
Sathya Perla39f1d942012-05-08 19:41:24 +00002819 if (be_physfn(adapter) && num_vfs) {
2820 if (adapter->dev_num_vfs)
2821 be_vf_setup(adapter);
2822 else
2823 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002824 }
2825
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002826 be_cmd_get_phy_info(adapter);
2827 if (be_pause_supported(adapter))
2828 adapter->phy.fc_autoneg = 1;
2829
Sathya Perla191eb752012-02-23 18:50:13 +00002830 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2831 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002832 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002833err:
2834 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002835 return status;
2836}
2837
Ivan Vecera66268732011-12-08 01:31:21 +00002838#ifdef CONFIG_NET_POLL_CONTROLLER
2839static void be_netpoll(struct net_device *netdev)
2840{
2841 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002842 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002843 int i;
2844
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002845 for_all_evt_queues(adapter, eqo, i)
2846 event_handle(eqo);
2847
2848 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002849}
2850#endif
2851
Ajit Khaparde84517482009-09-04 03:12:16 +00002852#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002853char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2854
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002855static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002856 const u8 *p, u32 img_start, int image_size,
2857 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002858{
2859 u32 crc_offset;
2860 u8 flashed_crc[4];
2861 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002862
2863 crc_offset = hdr_size + img_start + image_size - 4;
2864
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002865 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002866
2867 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002868 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002869 if (status) {
2870 dev_err(&adapter->pdev->dev,
2871 "could not get crc from flash, not flashing redboot\n");
2872 return false;
2873 }
2874
2875 /*update redboot only if crc does not match*/
2876 if (!memcmp(flashed_crc, p, 4))
2877 return false;
2878 else
2879 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002880}
2881
Sathya Perla306f1342011-08-02 19:57:45 +00002882static bool phy_flashing_required(struct be_adapter *adapter)
2883{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002884 return (adapter->phy.phy_type == TN_8022 &&
2885 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002886}
2887
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002888static bool is_comp_in_ufi(struct be_adapter *adapter,
2889 struct flash_section_info *fsec, int type)
2890{
2891 int i = 0, img_type = 0;
2892 struct flash_section_info_g2 *fsec_g2 = NULL;
2893
2894 if (adapter->generation != BE_GEN3)
2895 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2896
2897 for (i = 0; i < MAX_FLASH_COMP; i++) {
2898 if (fsec_g2)
2899 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2900 else
2901 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2902
2903 if (img_type == type)
2904 return true;
2905 }
2906 return false;
2907
2908}
2909
2910struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2911 int header_size,
2912 const struct firmware *fw)
2913{
2914 struct flash_section_info *fsec = NULL;
2915 const u8 *p = fw->data;
2916
2917 p += header_size;
2918 while (p < (fw->data + fw->size)) {
2919 fsec = (struct flash_section_info *)p;
2920 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2921 return fsec;
2922 p += 32;
2923 }
2924 return NULL;
2925}
2926
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002927static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002928 const struct firmware *fw,
2929 struct be_dma_mem *flash_cmd,
2930 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002931
Ajit Khaparde84517482009-09-04 03:12:16 +00002932{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002933 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002934 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002935 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002936 int num_bytes;
2937 const u8 *p = fw->data;
2938 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002939 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002940 int num_comp, hdr_size;
2941 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002942
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002943 struct flash_comp gen3_flash_types[] = {
2944 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2945 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2946 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2947 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2948 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2949 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2950 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2951 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2952 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2953 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2954 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2955 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2956 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2957 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2958 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2959 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2960 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2961 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2962 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2963 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002964 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002965
2966 struct flash_comp gen2_flash_types[] = {
2967 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2968 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2969 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2970 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2971 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2972 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2973 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2974 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2975 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2976 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2977 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2978 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2979 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2980 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2981 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2982 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002983 };
2984
2985 if (adapter->generation == BE_GEN3) {
2986 pflashcomp = gen3_flash_types;
2987 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002988 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002989 } else {
2990 pflashcomp = gen2_flash_types;
2991 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002992 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002993 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002994 /* Get flash section info*/
2995 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2996 if (!fsec) {
2997 dev_err(&adapter->pdev->dev,
2998 "Invalid Cookie. UFI corrupted ?\n");
2999 return -1;
3000 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003001 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003002 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003003 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003004
3005 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3006 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3007 continue;
3008
3009 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003010 if (!phy_flashing_required(adapter))
3011 continue;
3012 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003013
3014 hdr_size = filehdr_size +
3015 (num_of_images * sizeof(struct image_hdr));
3016
3017 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3018 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3019 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003020 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003021
3022 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003023 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003024 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003025 if (p + pflashcomp[i].size > fw->data + fw->size)
3026 return -1;
3027 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003028 while (total_bytes) {
3029 if (total_bytes > 32*1024)
3030 num_bytes = 32*1024;
3031 else
3032 num_bytes = total_bytes;
3033 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003034 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003035 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003036 flash_op = FLASHROM_OPER_PHY_FLASH;
3037 else
3038 flash_op = FLASHROM_OPER_FLASH;
3039 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003040 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003041 flash_op = FLASHROM_OPER_PHY_SAVE;
3042 else
3043 flash_op = FLASHROM_OPER_SAVE;
3044 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003045 memcpy(req->params.data_buf, p, num_bytes);
3046 p += num_bytes;
3047 status = be_cmd_write_flashrom(adapter, flash_cmd,
3048 pflashcomp[i].optype, flash_op, num_bytes);
3049 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003050 if ((status == ILLEGAL_IOCTL_REQ) &&
3051 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003052 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003053 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003054 dev_err(&adapter->pdev->dev,
3055 "cmd to write to flash rom failed.\n");
3056 return -1;
3057 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003058 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003059 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003060 return 0;
3061}
3062
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003063static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3064{
3065 if (fhdr == NULL)
3066 return 0;
3067 if (fhdr->build[0] == '3')
3068 return BE_GEN3;
3069 else if (fhdr->build[0] == '2')
3070 return BE_GEN2;
3071 else
3072 return 0;
3073}
3074
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003075static int lancer_wait_idle(struct be_adapter *adapter)
3076{
3077#define SLIPORT_IDLE_TIMEOUT 30
3078 u32 reg_val;
3079 int status = 0, i;
3080
3081 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3082 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3083 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3084 break;
3085
3086 ssleep(1);
3087 }
3088
3089 if (i == SLIPORT_IDLE_TIMEOUT)
3090 status = -1;
3091
3092 return status;
3093}
3094
3095static int lancer_fw_reset(struct be_adapter *adapter)
3096{
3097 int status = 0;
3098
3099 status = lancer_wait_idle(adapter);
3100 if (status)
3101 return status;
3102
3103 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3104 PHYSDEV_CONTROL_OFFSET);
3105
3106 return status;
3107}
3108
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003109static int lancer_fw_download(struct be_adapter *adapter,
3110 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003111{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003112#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3113#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3114 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003115 const u8 *data_ptr = NULL;
3116 u8 *dest_image_ptr = NULL;
3117 size_t image_size = 0;
3118 u32 chunk_size = 0;
3119 u32 data_written = 0;
3120 u32 offset = 0;
3121 int status = 0;
3122 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003123 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003124
3125 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3126 dev_err(&adapter->pdev->dev,
3127 "FW Image not properly aligned. "
3128 "Length must be 4 byte aligned.\n");
3129 status = -EINVAL;
3130 goto lancer_fw_exit;
3131 }
3132
3133 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3134 + LANCER_FW_DOWNLOAD_CHUNK;
3135 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3136 &flash_cmd.dma, GFP_KERNEL);
3137 if (!flash_cmd.va) {
3138 status = -ENOMEM;
3139 dev_err(&adapter->pdev->dev,
3140 "Memory allocation failure while flashing\n");
3141 goto lancer_fw_exit;
3142 }
3143
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003144 dest_image_ptr = flash_cmd.va +
3145 sizeof(struct lancer_cmd_req_write_object);
3146 image_size = fw->size;
3147 data_ptr = fw->data;
3148
3149 while (image_size) {
3150 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3151
3152 /* Copy the image chunk content. */
3153 memcpy(dest_image_ptr, data_ptr, chunk_size);
3154
3155 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003156 chunk_size, offset,
3157 LANCER_FW_DOWNLOAD_LOCATION,
3158 &data_written, &change_status,
3159 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003160 if (status)
3161 break;
3162
3163 offset += data_written;
3164 data_ptr += data_written;
3165 image_size -= data_written;
3166 }
3167
3168 if (!status) {
3169 /* Commit the FW written */
3170 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003171 0, offset,
3172 LANCER_FW_DOWNLOAD_LOCATION,
3173 &data_written, &change_status,
3174 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003175 }
3176
3177 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3178 flash_cmd.dma);
3179 if (status) {
3180 dev_err(&adapter->pdev->dev,
3181 "Firmware load error. "
3182 "Status code: 0x%x Additional Status: 0x%x\n",
3183 status, add_status);
3184 goto lancer_fw_exit;
3185 }
3186
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003187 if (change_status == LANCER_FW_RESET_NEEDED) {
3188 status = lancer_fw_reset(adapter);
3189 if (status) {
3190 dev_err(&adapter->pdev->dev,
3191 "Adapter busy for FW reset.\n"
3192 "New FW will not be active.\n");
3193 goto lancer_fw_exit;
3194 }
3195 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3196 dev_err(&adapter->pdev->dev,
3197 "System reboot required for new FW"
3198 " to be active\n");
3199 }
3200
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003201 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3202lancer_fw_exit:
3203 return status;
3204}
3205
3206static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3207{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003208 struct flash_file_hdr_g2 *fhdr;
3209 struct flash_file_hdr_g3 *fhdr3;
3210 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003211 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003212 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003213 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003214
3215 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003216 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003217
Ajit Khaparde84517482009-09-04 03:12:16 +00003218 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003219 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3220 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003221 if (!flash_cmd.va) {
3222 status = -ENOMEM;
3223 dev_err(&adapter->pdev->dev,
3224 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003225 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003226 }
3227
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003228 if ((adapter->generation == BE_GEN3) &&
3229 (get_ufigen_type(fhdr) == BE_GEN3)) {
3230 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003231 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3232 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003233 img_hdr_ptr = (struct image_hdr *) (fw->data +
3234 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003235 i * sizeof(struct image_hdr)));
3236 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3237 status = be_flash_data(adapter, fw, &flash_cmd,
3238 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003239 }
3240 } else if ((adapter->generation == BE_GEN2) &&
3241 (get_ufigen_type(fhdr) == BE_GEN2)) {
3242 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3243 } else {
3244 dev_err(&adapter->pdev->dev,
3245 "UFI and Interface are not compatible for flashing\n");
3246 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003247 }
3248
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003249 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3250 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003251 if (status) {
3252 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003253 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003254 }
3255
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003256 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003257
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003258be_fw_exit:
3259 return status;
3260}
3261
3262int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3263{
3264 const struct firmware *fw;
3265 int status;
3266
3267 if (!netif_running(adapter->netdev)) {
3268 dev_err(&adapter->pdev->dev,
3269 "Firmware load not allowed (interface is down)\n");
3270 return -1;
3271 }
3272
3273 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3274 if (status)
3275 goto fw_exit;
3276
3277 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3278
3279 if (lancer_chip(adapter))
3280 status = lancer_fw_download(adapter, fw);
3281 else
3282 status = be_fw_download(adapter, fw);
3283
Ajit Khaparde84517482009-09-04 03:12:16 +00003284fw_exit:
3285 release_firmware(fw);
3286 return status;
3287}
3288
stephen hemmingere5686ad2012-01-05 19:10:25 +00003289static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003290 .ndo_open = be_open,
3291 .ndo_stop = be_close,
3292 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003293 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003294 .ndo_set_mac_address = be_mac_addr_set,
3295 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003296 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003297 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003298 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3299 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003300 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003301 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003302 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003303 .ndo_get_vf_config = be_get_vf_config,
3304#ifdef CONFIG_NET_POLL_CONTROLLER
3305 .ndo_poll_controller = be_netpoll,
3306#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003307};
3308
3309static void be_netdev_init(struct net_device *netdev)
3310{
3311 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003312 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003313 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003315 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003316 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3317 NETIF_F_HW_VLAN_TX;
3318 if (be_multi_rxq(adapter))
3319 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003320
3321 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003322 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003323
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003324 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003325 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003326
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003327 netdev->priv_flags |= IFF_UNICAST_FLT;
3328
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003329 netdev->flags |= IFF_MULTICAST;
3330
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003331 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003333 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334
3335 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3336
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003337 for_all_evt_queues(adapter, eqo, i)
3338 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003339}
3340
3341static void be_unmap_pci_bars(struct be_adapter *adapter)
3342{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003343 if (adapter->csr)
3344 iounmap(adapter->csr);
3345 if (adapter->db)
3346 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003347 if (adapter->roce_db.base)
3348 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3349}
3350
3351static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3352{
3353 struct pci_dev *pdev = adapter->pdev;
3354 u8 __iomem *addr;
3355
3356 addr = pci_iomap(pdev, 2, 0);
3357 if (addr == NULL)
3358 return -ENOMEM;
3359
3360 adapter->roce_db.base = addr;
3361 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3362 adapter->roce_db.size = 8192;
3363 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3364 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003365}
3366
3367static int be_map_pci_bars(struct be_adapter *adapter)
3368{
3369 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003370 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003371
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003372 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003373 if (be_type_2_3(adapter)) {
3374 addr = ioremap_nocache(
3375 pci_resource_start(adapter->pdev, 0),
3376 pci_resource_len(adapter->pdev, 0));
3377 if (addr == NULL)
3378 return -ENOMEM;
3379 adapter->db = addr;
3380 }
3381 if (adapter->if_type == SLI_INTF_TYPE_3) {
3382 if (lancer_roce_map_pci_bars(adapter))
3383 goto pci_map_err;
3384 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003385 return 0;
3386 }
3387
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003388 if (be_physfn(adapter)) {
3389 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3390 pci_resource_len(adapter->pdev, 2));
3391 if (addr == NULL)
3392 return -ENOMEM;
3393 adapter->csr = addr;
3394 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003395
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003396 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003397 db_reg = 4;
3398 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003399 if (be_physfn(adapter))
3400 db_reg = 4;
3401 else
3402 db_reg = 0;
3403 }
3404 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3405 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003406 if (addr == NULL)
3407 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003408 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003409 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3410 adapter->roce_db.size = 4096;
3411 adapter->roce_db.io_addr =
3412 pci_resource_start(adapter->pdev, db_reg);
3413 adapter->roce_db.total_size =
3414 pci_resource_len(adapter->pdev, db_reg);
3415 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003416 return 0;
3417pci_map_err:
3418 be_unmap_pci_bars(adapter);
3419 return -ENOMEM;
3420}
3421
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003422static void be_ctrl_cleanup(struct be_adapter *adapter)
3423{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003424 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003425
3426 be_unmap_pci_bars(adapter);
3427
3428 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003429 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3430 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003431
Sathya Perla5b8821b2011-08-02 19:57:44 +00003432 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003433 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003434 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3435 mem->dma);
Sathya Perlacc7d7232012-08-28 20:37:43 +00003436 kfree(adapter->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003437}
3438
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003439static int be_ctrl_init(struct be_adapter *adapter)
3440{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003441 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3442 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003443 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003444 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003445
3446 status = be_map_pci_bars(adapter);
3447 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003448 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449
3450 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003451 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3452 mbox_mem_alloc->size,
3453 &mbox_mem_alloc->dma,
3454 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003455 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003456 status = -ENOMEM;
3457 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003458 }
3459 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3460 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3461 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3462 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003463
Sathya Perla5b8821b2011-08-02 19:57:44 +00003464 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3465 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3466 &rx_filter->dma, GFP_KERNEL);
3467 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003468 status = -ENOMEM;
3469 goto free_mbox;
3470 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003471 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003472
Sathya Perlacc7d7232012-08-28 20:37:43 +00003473 /* primary mac needs 1 pmac entry */
3474 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3475 sizeof(*adapter->pmac_id), GFP_KERNEL);
3476 if (!adapter->pmac_id)
3477 return -ENOMEM;
3478
Ivan Vecera29849612010-12-14 05:43:19 +00003479 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003480 spin_lock_init(&adapter->mcc_lock);
3481 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003482
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003483 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003484 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003485 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003486
3487free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003488 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3489 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003490
3491unmap_pci_bars:
3492 be_unmap_pci_bars(adapter);
3493
3494done:
3495 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003496}
3497
3498static void be_stats_cleanup(struct be_adapter *adapter)
3499{
Sathya Perla3abcded2010-10-03 22:12:27 -07003500 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003501
3502 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003503 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3504 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003505}
3506
3507static int be_stats_init(struct be_adapter *adapter)
3508{
Sathya Perla3abcded2010-10-03 22:12:27 -07003509 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003510
Selvin Xavier005d5692011-05-16 07:36:35 +00003511 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003512 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003513 } else {
3514 if (lancer_chip(adapter))
3515 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3516 else
3517 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3518 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003519 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3520 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003521 if (cmd->va == NULL)
3522 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003523 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003524 return 0;
3525}
3526
3527static void __devexit be_remove(struct pci_dev *pdev)
3528{
3529 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003530
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003531 if (!adapter)
3532 return;
3533
Parav Pandit045508a2012-03-26 14:27:13 +00003534 be_roce_dev_remove(adapter);
3535
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003536 cancel_delayed_work_sync(&adapter->func_recovery_work);
3537
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003538 unregister_netdev(adapter->netdev);
3539
Sathya Perla5fb379e2009-06-18 00:02:59 +00003540 be_clear(adapter);
3541
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003542 /* tell fw we're done with firing cmds */
3543 be_cmd_fw_clean(adapter);
3544
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003545 be_stats_cleanup(adapter);
3546
3547 be_ctrl_cleanup(adapter);
3548
Sathya Perlad6b6d982012-09-05 01:56:48 +00003549 pci_disable_pcie_error_reporting(pdev);
3550
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003551 pci_set_drvdata(pdev, NULL);
3552 pci_release_regions(pdev);
3553 pci_disable_device(pdev);
3554
3555 free_netdev(adapter->netdev);
3556}
3557
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003558bool be_is_wol_supported(struct be_adapter *adapter)
3559{
3560 return ((adapter->wol_cap & BE_WOL_CAP) &&
3561 !be_is_wol_excluded(adapter)) ? true : false;
3562}
3563
Somnath Kotur941a77d2012-05-17 22:59:03 +00003564u32 be_get_fw_log_level(struct be_adapter *adapter)
3565{
3566 struct be_dma_mem extfat_cmd;
3567 struct be_fat_conf_params *cfgs;
3568 int status;
3569 u32 level = 0;
3570 int j;
3571
3572 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3573 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3574 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3575 &extfat_cmd.dma);
3576
3577 if (!extfat_cmd.va) {
3578 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3579 __func__);
3580 goto err;
3581 }
3582
3583 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3584 if (!status) {
3585 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3586 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003587 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003588 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3589 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3590 }
3591 }
3592 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3593 extfat_cmd.dma);
3594err:
3595 return level;
3596}
Sathya Perla39f1d942012-05-08 19:41:24 +00003597static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003598{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003599 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003600 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003601
Sathya Perla3abcded2010-10-03 22:12:27 -07003602 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3603 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003604 if (status)
3605 return status;
3606
Sathya Perla752961a2011-10-24 02:45:03 +00003607 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003608 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003609 else
3610 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3611
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003612 if (be_physfn(adapter))
3613 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3614 else
3615 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3616
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003617 status = be_cmd_get_cntl_attributes(adapter);
3618 if (status)
3619 return status;
3620
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003621 status = be_cmd_get_acpi_wol_cap(adapter);
3622 if (status) {
3623 /* in case of a failure to get wol capabillities
3624 * check the exclusion list to determine WOL capability */
3625 if (!be_is_wol_excluded(adapter))
3626 adapter->wol_cap |= BE_WOL_CAP;
3627 }
3628
3629 if (be_is_wol_supported(adapter))
3630 adapter->wol = true;
3631
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003632 /* Must be a power of 2 or else MODULO will BUG_ON */
3633 adapter->be_get_temp_freq = 64;
3634
Somnath Kotur941a77d2012-05-17 22:59:03 +00003635 level = be_get_fw_log_level(adapter);
3636 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3637
Sathya Perla2243e2e2009-11-22 22:02:03 +00003638 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003639}
3640
Sathya Perla39f1d942012-05-08 19:41:24 +00003641static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003642{
3643 struct pci_dev *pdev = adapter->pdev;
3644 u32 sli_intf = 0, if_type;
3645
3646 switch (pdev->device) {
3647 case BE_DEVICE_ID1:
3648 case OC_DEVICE_ID1:
3649 adapter->generation = BE_GEN2;
3650 break;
3651 case BE_DEVICE_ID2:
3652 case OC_DEVICE_ID2:
3653 adapter->generation = BE_GEN3;
3654 break;
3655 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003656 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003657 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003658 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3659 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003660 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3661 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003662 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003663 !be_type_2_3(adapter)) {
3664 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3665 return -EINVAL;
3666 }
3667 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3668 SLI_INTF_FAMILY_SHIFT);
3669 adapter->generation = BE_GEN3;
3670 break;
3671 case OC_DEVICE_ID5:
3672 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3673 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003674 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3675 return -EINVAL;
3676 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003677 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3678 SLI_INTF_FAMILY_SHIFT);
3679 adapter->generation = BE_GEN3;
3680 break;
3681 default:
3682 adapter->generation = 0;
3683 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003684
3685 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3686 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003687 return 0;
3688}
3689
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003690static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003691{
3692 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003693
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003694 status = lancer_test_and_set_rdy_state(adapter);
3695 if (status)
3696 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003697
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003698 if (netif_running(adapter->netdev))
3699 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003700
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003701 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003702
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003703 adapter->hw_error = false;
3704 adapter->fw_timeout = false;
3705
3706 status = be_setup(adapter);
3707 if (status)
3708 goto err;
3709
3710 if (netif_running(adapter->netdev)) {
3711 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003712 if (status)
3713 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003714 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003715
3716 dev_err(&adapter->pdev->dev,
3717 "Adapter SLIPORT recovery succeeded\n");
3718 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003719err:
3720 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003721 "Adapter SLIPORT recovery failed\n");
3722
3723 return status;
3724}
3725
3726static void be_func_recovery_task(struct work_struct *work)
3727{
3728 struct be_adapter *adapter =
3729 container_of(work, struct be_adapter, func_recovery_work.work);
3730 int status;
3731
3732 be_detect_error(adapter);
3733
3734 if (adapter->hw_error && lancer_chip(adapter)) {
3735
3736 if (adapter->eeh_error)
3737 goto out;
3738
3739 rtnl_lock();
3740 netif_device_detach(adapter->netdev);
3741 rtnl_unlock();
3742
3743 status = lancer_recover_func(adapter);
3744
3745 if (!status)
3746 netif_device_attach(adapter->netdev);
3747 }
3748
3749out:
3750 schedule_delayed_work(&adapter->func_recovery_work,
3751 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003752}
3753
3754static void be_worker(struct work_struct *work)
3755{
3756 struct be_adapter *adapter =
3757 container_of(work, struct be_adapter, work.work);
3758 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003759 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003760 int i;
3761
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003762 /* when interrupts are not yet enabled, just reap any pending
3763 * mcc completions */
3764 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00003765 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003766 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00003767 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003768 goto reschedule;
3769 }
3770
3771 if (!adapter->stats_cmd_sent) {
3772 if (lancer_chip(adapter))
3773 lancer_cmd_get_pport_stats(adapter,
3774 &adapter->stats_cmd);
3775 else
3776 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3777 }
3778
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003779 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3780 be_cmd_get_die_temperature(adapter);
3781
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003782 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003783 if (rxo->rx_post_starved) {
3784 rxo->rx_post_starved = false;
3785 be_post_rx_frags(rxo, GFP_KERNEL);
3786 }
3787 }
3788
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003789 for_all_evt_queues(adapter, eqo, i)
3790 be_eqd_update(adapter, eqo);
3791
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003792reschedule:
3793 adapter->work_counter++;
3794 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3795}
3796
Sathya Perla39f1d942012-05-08 19:41:24 +00003797static bool be_reset_required(struct be_adapter *adapter)
3798{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003799 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003800}
3801
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003802static int __devinit be_probe(struct pci_dev *pdev,
3803 const struct pci_device_id *pdev_id)
3804{
3805 int status = 0;
3806 struct be_adapter *adapter;
3807 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003808 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003809
3810 status = pci_enable_device(pdev);
3811 if (status)
3812 goto do_none;
3813
3814 status = pci_request_regions(pdev, DRV_NAME);
3815 if (status)
3816 goto disable_dev;
3817 pci_set_master(pdev);
3818
Sathya Perla7f640062012-06-05 19:37:20 +00003819 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003820 if (netdev == NULL) {
3821 status = -ENOMEM;
3822 goto rel_reg;
3823 }
3824 adapter = netdev_priv(netdev);
3825 adapter->pdev = pdev;
3826 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003827
Sathya Perla39f1d942012-05-08 19:41:24 +00003828 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003829 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003830 goto free_netdev;
3831
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003832 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003833 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003834
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003835 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003836 if (!status) {
3837 netdev->features |= NETIF_F_HIGHDMA;
3838 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003839 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840 if (status) {
3841 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3842 goto free_netdev;
3843 }
3844 }
3845
Sathya Perlad6b6d982012-09-05 01:56:48 +00003846 status = pci_enable_pcie_error_reporting(pdev);
3847 if (status)
3848 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3849
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003850 status = be_ctrl_init(adapter);
3851 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003852 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003853
Sathya Perla2243e2e2009-11-22 22:02:03 +00003854 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003855 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003856 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003857 if (status)
3858 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003859 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003860
3861 /* tell fw we're ready to fire cmds */
3862 status = be_cmd_fw_init(adapter);
3863 if (status)
3864 goto ctrl_clean;
3865
Sathya Perla39f1d942012-05-08 19:41:24 +00003866 if (be_reset_required(adapter)) {
3867 status = be_cmd_reset_function(adapter);
3868 if (status)
3869 goto ctrl_clean;
3870 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003871
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003872 /* The INTR bit may be set in the card when probed by a kdump kernel
3873 * after a crash.
3874 */
3875 if (!lancer_chip(adapter))
3876 be_intr_set(adapter, false);
3877
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003878 status = be_stats_init(adapter);
3879 if (status)
3880 goto ctrl_clean;
3881
Sathya Perla39f1d942012-05-08 19:41:24 +00003882 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003883 if (status)
3884 goto stats_clean;
3885
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003886 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003887 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003888 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003889
Sathya Perla5fb379e2009-06-18 00:02:59 +00003890 status = be_setup(adapter);
3891 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003892 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003893
Sathya Perla3abcded2010-10-03 22:12:27 -07003894 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003895 status = register_netdev(netdev);
3896 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003897 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003898
Parav Pandit045508a2012-03-26 14:27:13 +00003899 be_roce_dev_add(adapter);
3900
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003901 schedule_delayed_work(&adapter->func_recovery_work,
3902 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003903
3904 be_cmd_query_port_name(adapter, &port_name);
3905
3906 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
3907 port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003908
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003909 return 0;
3910
Sathya Perla5fb379e2009-06-18 00:02:59 +00003911unsetup:
3912 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003913msix_disable:
3914 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003915stats_clean:
3916 be_stats_cleanup(adapter);
3917ctrl_clean:
3918 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003919free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003920 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003921 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003922rel_reg:
3923 pci_release_regions(pdev);
3924disable_dev:
3925 pci_disable_device(pdev);
3926do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003927 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003928 return status;
3929}
3930
3931static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3932{
3933 struct be_adapter *adapter = pci_get_drvdata(pdev);
3934 struct net_device *netdev = adapter->netdev;
3935
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003936 if (adapter->wol)
3937 be_setup_wol(adapter, true);
3938
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003939 cancel_delayed_work_sync(&adapter->func_recovery_work);
3940
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003941 netif_device_detach(netdev);
3942 if (netif_running(netdev)) {
3943 rtnl_lock();
3944 be_close(netdev);
3945 rtnl_unlock();
3946 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003947 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003948
3949 pci_save_state(pdev);
3950 pci_disable_device(pdev);
3951 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3952 return 0;
3953}
3954
3955static int be_resume(struct pci_dev *pdev)
3956{
3957 int status = 0;
3958 struct be_adapter *adapter = pci_get_drvdata(pdev);
3959 struct net_device *netdev = adapter->netdev;
3960
3961 netif_device_detach(netdev);
3962
3963 status = pci_enable_device(pdev);
3964 if (status)
3965 return status;
3966
3967 pci_set_power_state(pdev, 0);
3968 pci_restore_state(pdev);
3969
Sathya Perla2243e2e2009-11-22 22:02:03 +00003970 /* tell fw we're ready to fire cmds */
3971 status = be_cmd_fw_init(adapter);
3972 if (status)
3973 return status;
3974
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003975 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003976 if (netif_running(netdev)) {
3977 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003978 be_open(netdev);
3979 rtnl_unlock();
3980 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003981
3982 schedule_delayed_work(&adapter->func_recovery_work,
3983 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003984 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003985
3986 if (adapter->wol)
3987 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003988
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003989 return 0;
3990}
3991
Sathya Perla82456b02010-02-17 01:35:37 +00003992/*
3993 * An FLR will stop BE from DMAing any data.
3994 */
3995static void be_shutdown(struct pci_dev *pdev)
3996{
3997 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003998
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003999 if (!adapter)
4000 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004001
Sathya Perla0f4a6822011-03-21 20:49:28 +00004002 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004003 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004004
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004005 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004006
Sathya Perla82456b02010-02-17 01:35:37 +00004007 if (adapter->wol)
4008 be_setup_wol(adapter, true);
4009
Ajit Khaparde57841862011-04-06 18:08:43 +00004010 be_cmd_reset_function(adapter);
4011
Sathya Perla82456b02010-02-17 01:35:37 +00004012 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004013}
4014
Sathya Perlacf588472010-02-14 21:22:01 +00004015static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4016 pci_channel_state_t state)
4017{
4018 struct be_adapter *adapter = pci_get_drvdata(pdev);
4019 struct net_device *netdev = adapter->netdev;
4020
4021 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4022
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004023 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004024
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004025 cancel_delayed_work_sync(&adapter->func_recovery_work);
4026
4027 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004028 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004029 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004030
4031 if (netif_running(netdev)) {
4032 rtnl_lock();
4033 be_close(netdev);
4034 rtnl_unlock();
4035 }
4036 be_clear(adapter);
4037
4038 if (state == pci_channel_io_perm_failure)
4039 return PCI_ERS_RESULT_DISCONNECT;
4040
4041 pci_disable_device(pdev);
4042
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004043 /* The error could cause the FW to trigger a flash debug dump.
4044 * Resetting the card while flash dump is in progress
4045 * can cause it not to recover; wait for it to finish
4046 */
4047 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00004048 return PCI_ERS_RESULT_NEED_RESET;
4049}
4050
4051static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4052{
4053 struct be_adapter *adapter = pci_get_drvdata(pdev);
4054 int status;
4055
4056 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004057 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004058
4059 status = pci_enable_device(pdev);
4060 if (status)
4061 return PCI_ERS_RESULT_DISCONNECT;
4062
4063 pci_set_master(pdev);
4064 pci_set_power_state(pdev, 0);
4065 pci_restore_state(pdev);
4066
4067 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004068 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004069 if (status)
4070 return PCI_ERS_RESULT_DISCONNECT;
4071
Sathya Perlad6b6d982012-09-05 01:56:48 +00004072 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004073 return PCI_ERS_RESULT_RECOVERED;
4074}
4075
4076static void be_eeh_resume(struct pci_dev *pdev)
4077{
4078 int status = 0;
4079 struct be_adapter *adapter = pci_get_drvdata(pdev);
4080 struct net_device *netdev = adapter->netdev;
4081
4082 dev_info(&adapter->pdev->dev, "EEH resume\n");
4083
4084 pci_save_state(pdev);
4085
4086 /* tell fw we're ready to fire cmds */
4087 status = be_cmd_fw_init(adapter);
4088 if (status)
4089 goto err;
4090
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004091 status = be_cmd_reset_function(adapter);
4092 if (status)
4093 goto err;
4094
Sathya Perlacf588472010-02-14 21:22:01 +00004095 status = be_setup(adapter);
4096 if (status)
4097 goto err;
4098
4099 if (netif_running(netdev)) {
4100 status = be_open(netdev);
4101 if (status)
4102 goto err;
4103 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004104
4105 schedule_delayed_work(&adapter->func_recovery_work,
4106 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004107 netif_device_attach(netdev);
4108 return;
4109err:
4110 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004111}
4112
4113static struct pci_error_handlers be_eeh_handlers = {
4114 .error_detected = be_eeh_err_detected,
4115 .slot_reset = be_eeh_reset,
4116 .resume = be_eeh_resume,
4117};
4118
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004119static struct pci_driver be_driver = {
4120 .name = DRV_NAME,
4121 .id_table = be_dev_ids,
4122 .probe = be_probe,
4123 .remove = be_remove,
4124 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004125 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004126 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004127 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004128};
4129
4130static int __init be_init_module(void)
4131{
Joe Perches8e95a202009-12-03 07:58:21 +00004132 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4133 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004134 printk(KERN_WARNING DRV_NAME
4135 " : Module param rx_frag_size must be 2048/4096/8192."
4136 " Using 2048\n");
4137 rx_frag_size = 2048;
4138 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004139
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004140 return pci_register_driver(&be_driver);
4141}
4142module_init(be_init_module);
4143
4144static void __exit be_exit_module(void)
4145{
4146 pci_unregister_driver(&be_driver);
4147}
4148module_exit(be_exit_module);