blob: 966d9afb652f4609c6012a47270ac91b45b3939d [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070051static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000052 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070086static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000087 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700110 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
Sathya Perla752961a2011-10-24 02:45:03 +0000121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 mem->va = NULL;
135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000150 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
Sathya Perla8788fdc2009-07-27 22:52:03 +0000155static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000159 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000160 return;
161
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000205 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000206 return;
207
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000224 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000225 return;
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000238 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000239 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000241 if (!is_valid_ether_addr(addr->sa_data))
242 return -EADDRNOTAVAIL;
243
Somnath Koture3a7ae22011-10-27 07:14:05 +0000244 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000245 MAC_ADDRESS_TYPE_NETWORK, false,
246 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000247 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000248 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700249
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
251 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000252 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000253 if (status)
254 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Somnath Koture3a7ae22011-10-27 07:14:05 +0000256 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
257 }
258 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
259 return 0;
260err:
261 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 return status;
263}
264
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000265static void populate_be2_stats(struct be_adapter *adapter)
266{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000267 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
268 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
269 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000271 &rxf_stats->port[adapter->port_num];
272 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000273
Sathya Perlaac124ff2011-07-25 19:10:14 +0000274 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000275 drvs->rx_pause_frames = port_stats->rx_pause_frames;
276 drvs->rx_crc_errors = port_stats->rx_crc_errors;
277 drvs->rx_control_frames = port_stats->rx_control_frames;
278 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
279 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
280 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
281 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
282 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
283 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
284 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
285 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
286 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
287 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
288 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000290 drvs->rx_dropped_header_too_small =
291 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000292 drvs->rx_address_mismatch_drops =
293 port_stats->rx_address_mismatch_drops +
294 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000295 drvs->rx_alignment_symbol_errors =
296 port_stats->rx_alignment_symbol_errors;
297
298 drvs->tx_pauseframes = port_stats->tx_pauseframes;
299 drvs->tx_controlframes = port_stats->tx_controlframes;
300
301 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000302 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000307 drvs->forwarded_packets = rxf_stats->forwarded_packets;
308 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000309 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
310 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000311 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
312}
313
314static void populate_be3_stats(struct be_adapter *adapter)
315{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000316 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
317 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
318 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000320 &rxf_stats->port[adapter->port_num];
321 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000322
Sathya Perlaac124ff2011-07-25 19:10:14 +0000323 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000324 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
325 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
336 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
337 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
338 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
339 drvs->rx_dropped_header_too_small =
340 port_stats->rx_dropped_header_too_small;
341 drvs->rx_input_fifo_overflow_drop =
342 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000343 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 drvs->rx_alignment_symbol_errors =
345 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000347 drvs->tx_pauseframes = port_stats->tx_pauseframes;
348 drvs->tx_controlframes = port_stats->tx_controlframes;
349 drvs->jabber_events = port_stats->jabber_events;
350 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000352 drvs->forwarded_packets = rxf_stats->forwarded_packets;
353 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
355 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
357}
358
Selvin Xavier005d5692011-05-16 07:36:35 +0000359static void populate_lancer_stats(struct be_adapter *adapter)
360{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000361
Selvin Xavier005d5692011-05-16 07:36:35 +0000362 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000363 struct lancer_pport_stats *pport_stats =
364 pport_stats_from_cmd(adapter);
365
366 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
367 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
368 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
369 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000372 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376 drvs->rx_dropped_tcp_length =
377 pport_stats->rx_dropped_invalid_tcp_length;
378 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381 drvs->rx_dropped_header_too_small =
382 pport_stats->rx_dropped_header_too_small;
383 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000384 drvs->rx_address_mismatch_drops =
385 pport_stats->rx_address_mismatch_drops +
386 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
390 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 drvs->forwarded_packets = pport_stats->num_forwards_lo;
393 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000396}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perla09c1c682011-08-22 19:41:53 +0000398static void accumulate_16bit_val(u32 *acc, u16 val)
399{
400#define lo(x) (x & 0xFFFF)
401#define hi(x) (x & 0xFFFF0000)
402 bool wrapped = val < lo(*acc);
403 u32 newacc = hi(*acc) + val;
404
405 if (wrapped)
406 newacc += 65536;
407 ACCESS_ONCE(*acc) = newacc;
408}
409
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410void be_parse_stats(struct be_adapter *adapter)
411{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000412 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
413 struct be_rx_obj *rxo;
414 int i;
415
Selvin Xavier005d5692011-05-16 07:36:35 +0000416 if (adapter->generation == BE_GEN3) {
417 if (lancer_chip(adapter))
418 populate_lancer_stats(adapter);
419 else
420 populate_be3_stats(adapter);
421 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000423 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000424
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000425 if (lancer_chip(adapter))
426 goto done;
427
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000429 for_all_rx_queues(adapter, rxo, i) {
430 /* below erx HW counter can actually wrap around after
431 * 65535. Driver accumulates a 32-bit value
432 */
433 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
434 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
435 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000436done:
437 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438}
439
Sathya Perlaab1594e2011-07-25 19:10:15 +0000440static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
441 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700442{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000443 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700445 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000446 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000447 u64 pkts, bytes;
448 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700449 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700450
Sathya Perla3abcded2010-10-03 22:12:27 -0700451 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000452 const struct be_rx_stats *rx_stats = rx_stats(rxo);
453 do {
454 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
455 pkts = rx_stats(rxo)->rx_pkts;
456 bytes = rx_stats(rxo)->rx_bytes;
457 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
458 stats->rx_packets += pkts;
459 stats->rx_bytes += bytes;
460 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
461 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
462 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700463 }
464
Sathya Perla3c8def92011-06-12 20:01:58 +0000465 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000466 const struct be_tx_stats *tx_stats = tx_stats(txo);
467 do {
468 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
469 pkts = tx_stats(txo)->tx_pkts;
470 bytes = tx_stats(txo)->tx_bytes;
471 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
472 stats->tx_packets += pkts;
473 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000474 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700475
476 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000477 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000478 drvs->rx_alignment_symbol_errors +
479 drvs->rx_in_range_errors +
480 drvs->rx_out_range_errors +
481 drvs->rx_frame_too_long +
482 drvs->rx_dropped_too_small +
483 drvs->rx_dropped_too_short +
484 drvs->rx_dropped_header_too_small +
485 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000486 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700488 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490 drvs->rx_out_range_errors +
491 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000492
Sathya Perlaab1594e2011-07-25 19:10:15 +0000493 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700494
495 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000496 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000497
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498 /* receiver fifo overrun */
499 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000500 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000501 drvs->rx_input_fifo_overflow_drop +
502 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000503 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700504}
505
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000506void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508 struct net_device *netdev = adapter->netdev;
509
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000510 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000511 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000512 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000514
515 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
516 netif_carrier_on(netdev);
517 else
518 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700519}
520
Sathya Perla3c8def92011-06-12 20:01:58 +0000521static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000522 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523{
Sathya Perla3c8def92011-06-12 20:01:58 +0000524 struct be_tx_stats *stats = tx_stats(txo);
525
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000527 stats->tx_reqs++;
528 stats->tx_wrbs += wrb_cnt;
529 stats->tx_bytes += copied;
530 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000532 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000533 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534}
535
536/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000537static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
538 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700539{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700540 int cnt = (skb->len > skb->data_len);
541
542 cnt += skb_shinfo(skb)->nr_frags;
543
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700544 /* to account for hdr wrb */
545 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000546 if (lancer_chip(adapter) || !(cnt & 1)) {
547 *dummy = false;
548 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 /* add a dummy to make it an even num */
550 cnt++;
551 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000552 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
554 return cnt;
555}
556
557static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
558{
559 wrb->frag_pa_hi = upper_32_bits(addr);
560 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
561 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000562 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700563}
564
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000565static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
566 struct sk_buff *skb)
567{
568 u8 vlan_prio;
569 u16 vlan_tag;
570
571 vlan_tag = vlan_tx_tag_get(skb);
572 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
573 /* If vlan priority provided by OS is NOT in available bmap */
574 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
575 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
576 adapter->recommended_prio;
577
578 return vlan_tag;
579}
580
Somnath Kotur93040ae2012-06-26 22:32:10 +0000581static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
582{
583 return vlan_tx_tag_present(skb) || adapter->pvid;
584}
585
Somnath Koturcc4ce022010-10-21 07:11:14 -0700586static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
587 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000589 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700590
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 memset(hdr, 0, sizeof(*hdr));
592
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
594
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000595 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
598 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000599 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000601 if (lancer_chip(adapter) && adapter->sli_family ==
602 LANCER_A0_SLI_FAMILY) {
603 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
604 if (is_tcp_pkt(skb))
605 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
606 tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
609 udpcs, hdr, 1);
610 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
612 if (is_tcp_pkt(skb))
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
614 else if (is_udp_pkt(skb))
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
616 }
617
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700618 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000620 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700621 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 }
623
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
627 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
628}
629
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000630static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000631 bool unmap_single)
632{
633 dma_addr_t dma;
634
635 be_dws_le_to_cpu(wrb, sizeof(*wrb));
636
637 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000638 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000639 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000640 dma_unmap_single(dev, dma, wrb->frag_len,
641 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000642 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000643 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000644 }
645}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646
Sathya Perla3c8def92011-06-12 20:01:58 +0000647static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
649{
Sathya Perla7101e112010-03-22 20:41:12 +0000650 dma_addr_t busaddr;
651 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654 struct be_eth_wrb *wrb;
655 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000656 bool map_single = false;
657 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 hdr = queue_head_node(txq);
660 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000661 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662
David S. Millerebc8d2a2009-06-09 01:01:31 -0700663 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700664 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000665 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
666 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000667 goto dma_err;
668 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 wrb = queue_head_node(txq);
670 wrb_fill(wrb, busaddr, len);
671 be_dws_cpu_to_le(wrb, sizeof(*wrb));
672 queue_head_inc(txq);
673 copied += len;
674 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000679 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000681 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000682 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700683 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000684 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700685 be_dws_cpu_to_le(wrb, sizeof(*wrb));
686 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000687 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 }
689
690 if (dummy_wrb) {
691 wrb = queue_head_node(txq);
692 wrb_fill(wrb, 0, 0);
693 be_dws_cpu_to_le(wrb, sizeof(*wrb));
694 queue_head_inc(txq);
695 }
696
Somnath Koturcc4ce022010-10-21 07:11:14 -0700697 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 be_dws_cpu_to_le(hdr, sizeof(*hdr));
699
700 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000701dma_err:
702 txq->head = map_head;
703 while (copied) {
704 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000705 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000706 map_single = false;
707 copied -= wrb->frag_len;
708 queue_head_inc(txq);
709 }
710 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711}
712
Somnath Kotur93040ae2012-06-26 22:32:10 +0000713static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
714 struct sk_buff *skb)
715{
716 u16 vlan_tag = 0;
717
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 return skb;
721
722 if (vlan_tx_tag_present(skb)) {
723 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
724 __vlan_put_tag(skb, vlan_tag);
725 skb->vlan_tci = 0;
726 }
727
728 return skb;
729}
730
Stephen Hemminger613573252009-08-31 19:50:58 +0000731static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700732 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733{
734 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000735 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
736 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000737 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000739 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700740 bool dummy_wrb, stopped = false;
741
Somnath Kotur93040ae2012-06-26 22:32:10 +0000742 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
743 VLAN_ETH_HLEN : ETH_HLEN;
744
745 /* HW has a bug which considers padding bytes as legal
746 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000747 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000748 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
749 is_ipv4_pkt(skb)) {
750 ip = (struct iphdr *)ip_hdr(skb);
751 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
752 }
753
754 /* HW has a bug wherein it will calculate CSUM for VLAN
755 * pkts even though it is disabled.
756 * Manually insert VLAN in pkt.
757 */
758 if (skb->ip_summed != CHECKSUM_PARTIAL &&
759 be_vlan_tag_chk(adapter, skb)) {
760 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000761 if (unlikely(!skb))
762 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000763 }
764
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000765 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766
Sathya Perla3c8def92011-06-12 20:01:58 +0000767 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000768 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000769 int gso_segs = skb_shinfo(skb)->gso_segs;
770
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000771 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000772 BUG_ON(txo->sent_skb_list[start]);
773 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000775 /* Ensure txq has space for the next skb; Else stop the queue
776 * *BEFORE* ringing the tx doorbell, so that we serialze the
777 * tx compls of the current transmit which'll wake up the queue
778 */
Sathya Perla7101e112010-03-22 20:41:12 +0000779 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000780 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
781 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000782 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000783 stopped = true;
784 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000786 be_txq_notify(adapter, txq->id, wrb_cnt);
787
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000788 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000789 } else {
790 txq->head = start;
791 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000793tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 return NETDEV_TX_OK;
795}
796
797static int be_change_mtu(struct net_device *netdev, int new_mtu)
798{
799 struct be_adapter *adapter = netdev_priv(netdev);
800 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000801 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
802 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700803 dev_info(&adapter->pdev->dev,
804 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000805 BE_MIN_MTU,
806 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807 return -EINVAL;
808 }
809 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
810 netdev->mtu, new_mtu);
811 netdev->mtu = new_mtu;
812 return 0;
813}
814
815/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000816 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
817 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818 */
Sathya Perla10329df2012-06-05 19:37:18 +0000819static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820{
Sathya Perla10329df2012-06-05 19:37:18 +0000821 u16 vids[BE_NUM_VLANS_SUPPORTED];
822 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000823 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000824
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000825 /* No need to further configure vids if in promiscuous mode */
826 if (adapter->promiscuous)
827 return 0;
828
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000829 if (adapter->vlans_added > adapter->max_vlans)
830 goto set_vlan_promisc;
831
832 /* Construct VLAN Table to give to HW */
833 for (i = 0; i < VLAN_N_VID; i++)
834 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000835 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000836
837 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000838 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000839
840 /* Set to VLAN promisc mode as setting VLAN filter failed */
841 if (status) {
842 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
843 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
844 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000846
Sathya Perlab31c50a2009-09-17 10:30:13 -0700847 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000848
849set_vlan_promisc:
850 status = be_cmd_vlan_config(adapter, adapter->if_handle,
851 NULL, 0, 1, 1);
852 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853}
854
Jiri Pirko8e586132011-12-08 19:52:37 -0500855static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856{
857 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000858 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000860 if (!be_physfn(adapter)) {
861 status = -EINVAL;
862 goto ret;
863 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000864
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000866 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000867 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500868
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000869 if (!status)
870 adapter->vlans_added++;
871 else
872 adapter->vlan_tag[vid] = 0;
873ret:
874 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875}
876
Jiri Pirko8e586132011-12-08 19:52:37 -0500877static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878{
879 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000880 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000882 if (!be_physfn(adapter)) {
883 status = -EINVAL;
884 goto ret;
885 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000886
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700887 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000888 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000889 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500890
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000891 if (!status)
892 adapter->vlans_added--;
893 else
894 adapter->vlan_tag[vid] = 1;
895ret:
896 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897}
898
Sathya Perlaa54769f2011-10-24 02:45:00 +0000899static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700900{
901 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000902 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700903
904 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000905 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000906 adapter->promiscuous = true;
907 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000909
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300910 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000911 if (adapter->promiscuous) {
912 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000913 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000914
915 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000916 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000917 }
918
Sathya Perlae7b909a2009-11-22 22:01:10 +0000919 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000920 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000921 netdev_mc_count(netdev) > BE_MAX_MC) {
922 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000923 goto done;
924 }
925
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000926 if (netdev_uc_count(netdev) != adapter->uc_macs) {
927 struct netdev_hw_addr *ha;
928 int i = 1; /* First slot is claimed by the Primary MAC */
929
930 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
931 be_cmd_pmac_del(adapter, adapter->if_handle,
932 adapter->pmac_id[i], 0);
933 }
934
935 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
936 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
937 adapter->promiscuous = true;
938 goto done;
939 }
940
941 netdev_for_each_uc_addr(ha, adapter->netdev) {
942 adapter->uc_macs++; /* First slot is for Primary MAC */
943 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
944 adapter->if_handle,
945 &adapter->pmac_id[adapter->uc_macs], 0);
946 }
947 }
948
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000949 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
950
951 /* Set to MCAST promisc mode if setting MULTICAST address fails */
952 if (status) {
953 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
954 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
955 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
956 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000957done:
958 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700959}
960
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000961static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
962{
963 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000964 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000965 int status;
966
Sathya Perla11ac75e2011-12-13 00:58:50 +0000967 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000968 return -EPERM;
969
Sathya Perla11ac75e2011-12-13 00:58:50 +0000970 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000971 return -EINVAL;
972
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000973 if (lancer_chip(adapter)) {
974 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
975 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000976 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
977 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000978
Sathya Perla11ac75e2011-12-13 00:58:50 +0000979 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
980 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000981 }
982
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000983 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000984 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
985 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000986 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000987 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000988
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000989 return status;
990}
991
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000992static int be_get_vf_config(struct net_device *netdev, int vf,
993 struct ifla_vf_info *vi)
994{
995 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000996 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000997
Sathya Perla11ac75e2011-12-13 00:58:50 +0000998 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000999 return -EPERM;
1000
Sathya Perla11ac75e2011-12-13 00:58:50 +00001001 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001002 return -EINVAL;
1003
1004 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001005 vi->tx_rate = vf_cfg->tx_rate;
1006 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001007 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001008 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001009
1010 return 0;
1011}
1012
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001013static int be_set_vf_vlan(struct net_device *netdev,
1014 int vf, u16 vlan, u8 qos)
1015{
1016 struct be_adapter *adapter = netdev_priv(netdev);
1017 int status = 0;
1018
Sathya Perla11ac75e2011-12-13 00:58:50 +00001019 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001020 return -EPERM;
1021
Sathya Perla11ac75e2011-12-13 00:58:50 +00001022 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001023 return -EINVAL;
1024
1025 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001026 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1027 /* If this is new value, program it. Else skip. */
1028 adapter->vf_cfg[vf].vlan_tag = vlan;
1029
1030 status = be_cmd_set_hsw_config(adapter, vlan,
1031 vf + 1, adapter->vf_cfg[vf].if_handle);
1032 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001033 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001034 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001035 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001036 vlan = adapter->vf_cfg[vf].def_vid;
1037 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1038 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001039 }
1040
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001041
1042 if (status)
1043 dev_info(&adapter->pdev->dev,
1044 "VLAN %d config on VF %d failed\n", vlan, vf);
1045 return status;
1046}
1047
Ajit Khapardee1d18732010-07-23 01:52:13 +00001048static int be_set_vf_tx_rate(struct net_device *netdev,
1049 int vf, int rate)
1050{
1051 struct be_adapter *adapter = netdev_priv(netdev);
1052 int status = 0;
1053
Sathya Perla11ac75e2011-12-13 00:58:50 +00001054 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001055 return -EPERM;
1056
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001057 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001058 return -EINVAL;
1059
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001060 if (rate < 100 || rate > 10000) {
1061 dev_err(&adapter->pdev->dev,
1062 "tx rate must be between 100 and 10000 Mbps\n");
1063 return -EINVAL;
1064 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001065
Ajit Khaparde856c4012011-02-11 13:32:32 +00001066 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001067
1068 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001069 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001070 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001071 else
1072 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001073 return status;
1074}
1075
Sathya Perla39f1d942012-05-08 19:41:24 +00001076static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1077{
1078 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera51af6d72012-09-25 02:50:42 +00001079 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001080 u16 offset, stride;
1081
1082 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001083 if (!pos)
1084 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1086 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1087
1088 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1089 while (dev) {
Ivan Vecera51af6d72012-09-25 02:50:42 +00001090 if (dev->is_virtfn && dev->physfn == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001091 vfs++;
1092 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1093 assigned_vfs++;
1094 }
1095 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1096 }
1097 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1098}
1099
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001100static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001102 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001103 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001104 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001105 u64 pkts;
1106 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001108 if (!eqo->enable_aic) {
1109 eqd = eqo->eqd;
1110 goto modify_eqd;
1111 }
1112
1113 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001114 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001116 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1117
Sathya Perla4097f662009-03-24 16:40:13 -07001118 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001119 if (time_before(now, stats->rx_jiffies)) {
1120 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001121 return;
1122 }
1123
Sathya Perlaac124ff2011-07-25 19:10:14 +00001124 /* Update once a second */
1125 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001126 return;
1127
Sathya Perlaab1594e2011-07-25 19:10:15 +00001128 do {
1129 start = u64_stats_fetch_begin_bh(&stats->sync);
1130 pkts = stats->rx_pkts;
1131 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1132
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001133 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001134 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001135 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001136 eqd = (stats->rx_pps / 110000) << 3;
1137 eqd = min(eqd, eqo->max_eqd);
1138 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001139 if (eqd < 10)
1140 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001141
1142modify_eqd:
1143 if (eqd != eqo->cur_eqd) {
1144 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1145 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001146 }
Sathya Perla4097f662009-03-24 16:40:13 -07001147}
1148
Sathya Perla3abcded2010-10-03 22:12:27 -07001149static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001150 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001151{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001152 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001153
Sathya Perlaab1594e2011-07-25 19:10:15 +00001154 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001155 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001156 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001157 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001158 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001159 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001160 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001161 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001162 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163}
1164
Sathya Perla2e588f82011-03-11 02:49:26 +00001165static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001166{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001167 /* L4 checksum is not reliable for non TCP/UDP packets.
1168 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001169 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1170 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001171}
1172
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001173static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1174 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001175{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001176 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001178 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179
Sathya Perla3abcded2010-10-03 22:12:27 -07001180 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181 BUG_ON(!rx_page_info->page);
1182
Ajit Khaparde205859a2010-02-09 01:34:21 +00001183 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001184 dma_unmap_page(&adapter->pdev->dev,
1185 dma_unmap_addr(rx_page_info, bus),
1186 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001187 rx_page_info->last_page_user = false;
1188 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189
1190 atomic_dec(&rxq->used);
1191 return rx_page_info;
1192}
1193
1194/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001195static void be_rx_compl_discard(struct be_rx_obj *rxo,
1196 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197{
Sathya Perla3abcded2010-10-03 22:12:27 -07001198 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001200 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001202 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001203 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001204 put_page(page_info->page);
1205 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001206 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207 }
1208}
1209
1210/*
1211 * skb_fill_rx_data forms a complete skb for an ether frame
1212 * indicated by rxcp.
1213 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001214static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1215 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001216{
Sathya Perla3abcded2010-10-03 22:12:27 -07001217 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001219 u16 i, j;
1220 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221 u8 *start;
1222
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001223 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 start = page_address(page_info->page) + page_info->page_offset;
1225 prefetch(start);
1226
1227 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001228 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230 skb->len = curr_frag_len;
1231 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001232 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233 /* Complete packet has now been moved to data */
1234 put_page(page_info->page);
1235 skb->data_len = 0;
1236 skb->tail += curr_frag_len;
1237 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001238 hdr_len = ETH_HLEN;
1239 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001241 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242 skb_shinfo(skb)->frags[0].page_offset =
1243 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001244 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001246 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247 skb->tail += hdr_len;
1248 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001249 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250
Sathya Perla2e588f82011-03-11 02:49:26 +00001251 if (rxcp->pkt_size <= rx_frag_size) {
1252 BUG_ON(rxcp->num_rcvd != 1);
1253 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254 }
1255
1256 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001257 index_inc(&rxcp->rxq_idx, rxq->len);
1258 remaining = rxcp->pkt_size - curr_frag_len;
1259 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001260 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001261 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001262
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001263 /* Coalesce all frags from the same physical page in one slot */
1264 if (page_info->page_offset == 0) {
1265 /* Fresh page */
1266 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001267 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001268 skb_shinfo(skb)->frags[j].page_offset =
1269 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001270 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001271 skb_shinfo(skb)->nr_frags++;
1272 } else {
1273 put_page(page_info->page);
1274 }
1275
Eric Dumazet9e903e02011-10-18 21:00:24 +00001276 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 skb->len += curr_frag_len;
1278 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001279 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001280 remaining -= curr_frag_len;
1281 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001282 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001284 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001285}
1286
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001287/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001288static void be_rx_compl_process(struct be_rx_obj *rxo,
1289 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001291 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001292 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001294
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001295 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001296 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001297 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001298 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299 return;
1300 }
1301
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001302 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001304 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001305 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001306 else
1307 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001309 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001310 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001311 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001312 skb->rxhash = rxcp->rss_hash;
1313
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001314
Jiri Pirko343e43c2011-08-25 02:50:51 +00001315 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001316 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1317
1318 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319}
1320
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001321/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001322void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1323 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001325 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001327 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001328 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001329 u16 remaining, curr_frag_len;
1330 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001331
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001332 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001333 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001334 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001335 return;
1336 }
1337
Sathya Perla2e588f82011-03-11 02:49:26 +00001338 remaining = rxcp->pkt_size;
1339 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001340 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341
1342 curr_frag_len = min(remaining, rx_frag_size);
1343
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001344 /* Coalesce all frags from the same physical page in one slot */
1345 if (i == 0 || page_info->page_offset == 0) {
1346 /* First frag or Fresh page */
1347 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001348 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001349 skb_shinfo(skb)->frags[j].page_offset =
1350 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001351 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001352 } else {
1353 put_page(page_info->page);
1354 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001355 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001356 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001358 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 memset(page_info, 0, sizeof(*page_info));
1360 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001361 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001363 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001364 skb->len = rxcp->pkt_size;
1365 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001366 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001367 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001368 if (adapter->netdev->features & NETIF_F_RXHASH)
1369 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001370
Jiri Pirko343e43c2011-08-25 02:50:51 +00001371 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001372 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1373
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001374 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375}
1376
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001377static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1378 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla2e588f82011-03-11 02:49:26 +00001380 rxcp->pkt_size =
1381 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1382 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1383 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1384 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001385 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001386 rxcp->ip_csum =
1387 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1388 rxcp->l4_csum =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1390 rxcp->ipv6 =
1391 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1392 rxcp->rxq_idx =
1393 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1394 rxcp->num_rcvd =
1395 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1396 rxcp->pkt_type =
1397 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001398 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001399 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001400 if (rxcp->vlanf) {
1401 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001402 compl);
1403 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1404 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001405 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001406 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001407}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001409static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1410 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001411{
1412 rxcp->pkt_size =
1413 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1414 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1415 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1416 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001417 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001418 rxcp->ip_csum =
1419 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1420 rxcp->l4_csum =
1421 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1422 rxcp->ipv6 =
1423 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1424 rxcp->rxq_idx =
1425 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1426 rxcp->num_rcvd =
1427 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1428 rxcp->pkt_type =
1429 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001430 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001431 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001432 if (rxcp->vlanf) {
1433 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001434 compl);
1435 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1436 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001437 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001438 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001439}
1440
1441static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1442{
1443 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1444 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1445 struct be_adapter *adapter = rxo->adapter;
1446
1447 /* For checking the valid bit it is Ok to use either definition as the
1448 * valid bit is at the same position in both v0 and v1 Rx compl */
1449 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450 return NULL;
1451
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001452 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001453 be_dws_le_to_cpu(compl, sizeof(*compl));
1454
1455 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001456 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001457 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001458 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001459
Sathya Perla15d72182011-03-21 20:49:26 +00001460 if (rxcp->vlanf) {
1461 /* vlanf could be wrongly set in some cards.
1462 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001463 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001464 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001465
Sathya Perla15d72182011-03-21 20:49:26 +00001466 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001467 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001468
Somnath Kotur939cf302011-08-18 21:51:49 -07001469 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001470 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001471 rxcp->vlanf = 0;
1472 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001473
1474 /* As the compl has been parsed, reset it; we wont touch it again */
1475 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476
Sathya Perla3abcded2010-10-03 22:12:27 -07001477 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 return rxcp;
1479}
1480
Eric Dumazet1829b082011-03-01 05:48:12 +00001481static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001484
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001486 gfp |= __GFP_COMP;
1487 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488}
1489
1490/*
1491 * Allocate a page, split it to fragments of size rx_frag_size and post as
1492 * receive buffers to BE
1493 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001494static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495{
Sathya Perla3abcded2010-10-03 22:12:27 -07001496 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001497 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001498 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 struct page *pagep = NULL;
1500 struct be_eth_rx_d *rxd;
1501 u64 page_dmaaddr = 0, frag_dmaaddr;
1502 u32 posted, page_offset = 0;
1503
Sathya Perla3abcded2010-10-03 22:12:27 -07001504 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1506 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001507 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001509 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510 break;
1511 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001512 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1513 0, adapter->big_page_size,
1514 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515 page_info->page_offset = 0;
1516 } else {
1517 get_page(pagep);
1518 page_info->page_offset = page_offset + rx_frag_size;
1519 }
1520 page_offset = page_info->page_offset;
1521 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001522 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1524
1525 rxd = queue_head_node(rxq);
1526 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1527 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528
1529 /* Any space left in the current big page for another frag? */
1530 if ((page_offset + rx_frag_size + rx_frag_size) >
1531 adapter->big_page_size) {
1532 pagep = NULL;
1533 page_info->last_page_user = true;
1534 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001535
1536 prev_page_info = page_info;
1537 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001538 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539 }
1540 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001541 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542
1543 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001545 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001546 } else if (atomic_read(&rxq->used) == 0) {
1547 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001548 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550}
1551
Sathya Perla5fb379e2009-06-18 00:02:59 +00001552static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1555
1556 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1557 return NULL;
1558
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001559 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1561
1562 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1563
1564 queue_tail_inc(tx_cq);
1565 return txcp;
1566}
1567
Sathya Perla3c8def92011-06-12 20:01:58 +00001568static u16 be_tx_compl_process(struct be_adapter *adapter,
1569 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570{
Sathya Perla3c8def92011-06-12 20:01:58 +00001571 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001572 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001573 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001575 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1576 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001578 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001580 sent_skbs[txq->tail] = NULL;
1581
1582 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001583 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001585 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001587 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001588 unmap_tx_frag(&adapter->pdev->dev, wrb,
1589 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001590 unmap_skb_hdr = false;
1591
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592 num_wrbs++;
1593 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001594 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001597 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598}
1599
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001600/* Return the number of events in the event queue */
1601static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001602{
1603 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001604 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001605
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 do {
1607 eqe = queue_tail_node(&eqo->q);
1608 if (eqe->evt == 0)
1609 break;
1610
1611 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001612 eqe->evt = 0;
1613 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001614 queue_tail_inc(&eqo->q);
1615 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001616
1617 return num;
1618}
1619
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001620static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001621{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001622 bool rearm = false;
1623 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001624
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001625 /* Deal with any spurious interrupts that come without events */
1626 if (!num)
1627 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001628
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001629 if (num || msix_enabled(eqo->adapter))
1630 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1631
Sathya Perla859b1e42009-08-10 03:43:51 +00001632 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001633 napi_schedule(&eqo->napi);
1634
1635 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001636}
1637
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001638/* Leaves the EQ is disarmed state */
1639static void be_eq_clean(struct be_eq_obj *eqo)
1640{
1641 int num = events_get(eqo);
1642
1643 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1644}
1645
1646static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647{
1648 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001649 struct be_queue_info *rxq = &rxo->q;
1650 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001651 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652 u16 tail;
1653
1654 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001655 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001656 be_rx_compl_discard(rxo, rxcp);
1657 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658 }
1659
1660 /* Then free posted rx buffer that were not used */
1661 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001662 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001663 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664 put_page(page_info->page);
1665 memset(page_info, 0, sizeof(*page_info));
1666 }
1667 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001668 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669}
1670
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001671static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001673 struct be_tx_obj *txo;
1674 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001675 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001676 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001677 struct sk_buff *sent_skb;
1678 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001679 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680
Sathya Perlaa8e91792009-08-10 03:42:43 +00001681 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1682 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001683 pending_txqs = adapter->num_tx_qs;
1684
1685 for_all_tx_queues(adapter, txo, i) {
1686 txq = &txo->q;
1687 while ((txcp = be_tx_compl_get(&txo->cq))) {
1688 end_idx =
1689 AMAP_GET_BITS(struct amap_eth_tx_compl,
1690 wrb_index, txcp);
1691 num_wrbs += be_tx_compl_process(adapter, txo,
1692 end_idx);
1693 cmpl++;
1694 }
1695 if (cmpl) {
1696 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1697 atomic_sub(num_wrbs, &txq->used);
1698 cmpl = 0;
1699 num_wrbs = 0;
1700 }
1701 if (atomic_read(&txq->used) == 0)
1702 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001703 }
1704
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001705 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001706 break;
1707
1708 mdelay(1);
1709 } while (true);
1710
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001711 for_all_tx_queues(adapter, txo, i) {
1712 txq = &txo->q;
1713 if (atomic_read(&txq->used))
1714 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1715 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001716
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001717 /* free posted tx for which compls will never arrive */
1718 while (atomic_read(&txq->used)) {
1719 sent_skb = txo->sent_skb_list[txq->tail];
1720 end_idx = txq->tail;
1721 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1722 &dummy_wrb);
1723 index_adv(&end_idx, num_wrbs - 1, txq->len);
1724 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1725 atomic_sub(num_wrbs, &txq->used);
1726 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001727 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728}
1729
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001730static void be_evt_queues_destroy(struct be_adapter *adapter)
1731{
1732 struct be_eq_obj *eqo;
1733 int i;
1734
1735 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001736 if (eqo->q.created) {
1737 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001738 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001739 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001740 be_queue_free(adapter, &eqo->q);
1741 }
1742}
1743
1744static int be_evt_queues_create(struct be_adapter *adapter)
1745{
1746 struct be_queue_info *eq;
1747 struct be_eq_obj *eqo;
1748 int i, rc;
1749
1750 adapter->num_evt_qs = num_irqs(adapter);
1751
1752 for_all_evt_queues(adapter, eqo, i) {
1753 eqo->adapter = adapter;
1754 eqo->tx_budget = BE_TX_BUDGET;
1755 eqo->idx = i;
1756 eqo->max_eqd = BE_MAX_EQD;
1757 eqo->enable_aic = true;
1758
1759 eq = &eqo->q;
1760 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1761 sizeof(struct be_eq_entry));
1762 if (rc)
1763 return rc;
1764
1765 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1766 if (rc)
1767 return rc;
1768 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001769 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001770}
1771
Sathya Perla5fb379e2009-06-18 00:02:59 +00001772static void be_mcc_queues_destroy(struct be_adapter *adapter)
1773{
1774 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001775
Sathya Perla8788fdc2009-07-27 22:52:03 +00001776 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001777 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001778 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001779 be_queue_free(adapter, q);
1780
Sathya Perla8788fdc2009-07-27 22:52:03 +00001781 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001782 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001783 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001784 be_queue_free(adapter, q);
1785}
1786
1787/* Must be called only after TX qs are created as MCC shares TX EQ */
1788static int be_mcc_queues_create(struct be_adapter *adapter)
1789{
1790 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001791
Sathya Perla8788fdc2009-07-27 22:52:03 +00001792 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001793 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001794 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001795 goto err;
1796
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001797 /* Use the default EQ for MCC completions */
1798 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001799 goto mcc_cq_free;
1800
Sathya Perla8788fdc2009-07-27 22:52:03 +00001801 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001802 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1803 goto mcc_cq_destroy;
1804
Sathya Perla8788fdc2009-07-27 22:52:03 +00001805 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001806 goto mcc_q_free;
1807
1808 return 0;
1809
1810mcc_q_free:
1811 be_queue_free(adapter, q);
1812mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001813 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001814mcc_cq_free:
1815 be_queue_free(adapter, cq);
1816err:
1817 return -1;
1818}
1819
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820static void be_tx_queues_destroy(struct be_adapter *adapter)
1821{
1822 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001823 struct be_tx_obj *txo;
1824 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825
Sathya Perla3c8def92011-06-12 20:01:58 +00001826 for_all_tx_queues(adapter, txo, i) {
1827 q = &txo->q;
1828 if (q->created)
1829 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1830 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831
Sathya Perla3c8def92011-06-12 20:01:58 +00001832 q = &txo->cq;
1833 if (q->created)
1834 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1835 be_queue_free(adapter, q);
1836 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837}
1838
Sathya Perladafc0fe2011-10-24 02:45:02 +00001839static int be_num_txqs_want(struct be_adapter *adapter)
1840{
Sathya Perla39f1d942012-05-08 19:41:24 +00001841 if (sriov_want(adapter) || be_is_mc(adapter) ||
1842 lancer_chip(adapter) || !be_physfn(adapter) ||
1843 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001844 return 1;
1845 else
1846 return MAX_TX_QS;
1847}
1848
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001849static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001851 struct be_queue_info *cq, *eq;
1852 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001853 struct be_tx_obj *txo;
1854 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
Sathya Perladafc0fe2011-10-24 02:45:02 +00001856 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001857 if (adapter->num_tx_qs != MAX_TX_QS) {
1858 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001859 netif_set_real_num_tx_queues(adapter->netdev,
1860 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001861 rtnl_unlock();
1862 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001863
Sathya Perla3c8def92011-06-12 20:01:58 +00001864 for_all_tx_queues(adapter, txo, i) {
1865 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001866 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1867 sizeof(struct be_eth_tx_compl));
1868 if (status)
1869 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001871 /* If num_evt_qs is less than num_tx_qs, then more than
1872 * one txq share an eq
1873 */
1874 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1875 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1876 if (status)
1877 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001878 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880}
1881
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001882static int be_tx_qs_create(struct be_adapter *adapter)
1883{
1884 struct be_tx_obj *txo;
1885 int i, status;
1886
1887 for_all_tx_queues(adapter, txo, i) {
1888 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1889 sizeof(struct be_eth_wrb));
1890 if (status)
1891 return status;
1892
1893 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1894 if (status)
1895 return status;
1896 }
1897
1898 return 0;
1899}
1900
1901static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902{
1903 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001904 struct be_rx_obj *rxo;
1905 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906
Sathya Perla3abcded2010-10-03 22:12:27 -07001907 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001908 q = &rxo->cq;
1909 if (q->created)
1910 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1911 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913}
1914
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001916{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 struct be_rx_obj *rxo;
1919 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001921 /* We'll create as many RSS rings as there are irqs.
1922 * But when there's only one irq there's no use creating RSS rings
1923 */
1924 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1925 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001926 if (adapter->num_rx_qs != MAX_RX_QS) {
1927 rtnl_lock();
1928 netif_set_real_num_rx_queues(adapter->netdev,
1929 adapter->num_rx_qs);
1930 rtnl_unlock();
1931 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001932
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001934 for_all_rx_queues(adapter, rxo, i) {
1935 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001936 cq = &rxo->cq;
1937 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1938 sizeof(struct be_eth_rx_compl));
1939 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001940 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001942 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1943 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001944 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001946 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948 if (adapter->num_rx_qs != MAX_RX_QS)
1949 dev_info(&adapter->pdev->dev,
Masanari Iidaf3f9f332012-08-03 02:36:51 +00001950 "Created only %d receive queues\n", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001953}
1954
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955static irqreturn_t be_intx(int irq, void *dev)
1956{
1957 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001958 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001960 /* With INTx only one EQ is used */
1961 num_evts = event_handle(&adapter->eq_obj[0]);
1962 if (num_evts)
1963 return IRQ_HANDLED;
1964 else
1965 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966}
1967
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001968static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001970 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001972 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 return IRQ_HANDLED;
1974}
1975
Sathya Perla2e588f82011-03-11 02:49:26 +00001976static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977{
Sathya Perla2e588f82011-03-11 02:49:26 +00001978 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979}
1980
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1982 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983{
Sathya Perla3abcded2010-10-03 22:12:27 -07001984 struct be_adapter *adapter = rxo->adapter;
1985 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001986 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987 u32 work_done;
1988
1989 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991 if (!rxcp)
1992 break;
1993
Sathya Perla12004ae2011-08-02 19:57:46 +00001994 /* Is it a flush compl that has no data */
1995 if (unlikely(rxcp->num_rcvd == 0))
1996 goto loop_continue;
1997
1998 /* Discard compl with partial DMA Lancer B0 */
1999 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002001 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002002 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002003
Sathya Perla12004ae2011-08-02 19:57:46 +00002004 /* On BE drop pkts that arrive due to imperfect filtering in
2005 * promiscuous mode on some skews
2006 */
2007 if (unlikely(rxcp->port != adapter->port_num &&
2008 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002009 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002010 goto loop_continue;
2011 }
2012
2013 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002014 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002015 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002017loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002018 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019 }
2020
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002021 if (work_done) {
2022 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002023
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2025 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002027
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028 return work_done;
2029}
2030
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2032 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002035 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002037 for (work_done = 0; work_done < budget; work_done++) {
2038 txcp = be_tx_compl_get(&txo->cq);
2039 if (!txcp)
2040 break;
2041 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002042 AMAP_GET_BITS(struct amap_eth_tx_compl,
2043 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 }
2045
2046 if (work_done) {
2047 be_cq_notify(adapter, txo->cq.id, true, work_done);
2048 atomic_sub(num_wrbs, &txo->q.used);
2049
2050 /* As Tx wrbs have been freed up, wake up netdev queue
2051 * if it was stopped due to lack of tx wrbs. */
2052 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2053 atomic_read(&txo->q.used) < txo->q.len / 2) {
2054 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002055 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002056
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2058 tx_stats(txo)->tx_compl += work_done;
2059 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2060 }
2061 return (work_done < budget); /* Done */
2062}
Sathya Perla3c8def92011-06-12 20:01:58 +00002063
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002064int be_poll(struct napi_struct *napi, int budget)
2065{
2066 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2067 struct be_adapter *adapter = eqo->adapter;
2068 int max_work = 0, work, i;
2069 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002070
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002071 /* Process all TXQs serviced by this EQ */
2072 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2073 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2074 eqo->tx_budget, i);
2075 if (!tx_done)
2076 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077 }
2078
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002079 /* This loop will iterate twice for EQ0 in which
2080 * completions of the last RXQ (default one) are also processed
2081 * For other EQs the loop iterates only once
2082 */
2083 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2084 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2085 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002086 }
2087
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002088 if (is_mcc_eqo(eqo))
2089 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002090
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002091 if (max_work < budget) {
2092 napi_complete(napi);
2093 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2094 } else {
2095 /* As we'll continue in polling mode, count and clear events */
2096 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002097 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002098 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099}
2100
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002101void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002102{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002103 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2104 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002105 u32 i;
2106
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002107 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002108 return;
2109
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002110 if (lancer_chip(adapter)) {
2111 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2112 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2113 sliport_err1 = ioread32(adapter->db +
2114 SLIPORT_ERROR1_OFFSET);
2115 sliport_err2 = ioread32(adapter->db +
2116 SLIPORT_ERROR2_OFFSET);
2117 }
2118 } else {
2119 pci_read_config_dword(adapter->pdev,
2120 PCICFG_UE_STATUS_LOW, &ue_lo);
2121 pci_read_config_dword(adapter->pdev,
2122 PCICFG_UE_STATUS_HIGH, &ue_hi);
2123 pci_read_config_dword(adapter->pdev,
2124 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2125 pci_read_config_dword(adapter->pdev,
2126 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002127
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002128 ue_lo = (ue_lo & ~ue_lo_mask);
2129 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002130 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002131
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002132 if (ue_lo || ue_hi ||
2133 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002134 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002135 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002136 "Error detected in the card\n");
2137 }
2138
2139 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2140 dev_err(&adapter->pdev->dev,
2141 "ERR: sliport status 0x%x\n", sliport_status);
2142 dev_err(&adapter->pdev->dev,
2143 "ERR: sliport error1 0x%x\n", sliport_err1);
2144 dev_err(&adapter->pdev->dev,
2145 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002146 }
2147
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002148 if (ue_lo) {
2149 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2150 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002151 dev_err(&adapter->pdev->dev,
2152 "UE: %s bit set\n", ue_status_low_desc[i]);
2153 }
2154 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002155
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002156 if (ue_hi) {
2157 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2158 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002159 dev_err(&adapter->pdev->dev,
2160 "UE: %s bit set\n", ue_status_hi_desc[i]);
2161 }
2162 }
2163
2164}
2165
Sathya Perla8d56ff12009-11-22 22:02:26 +00002166static void be_msix_disable(struct be_adapter *adapter)
2167{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002168 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002169 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002170 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002171 }
2172}
2173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174static uint be_num_rss_want(struct be_adapter *adapter)
2175{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002176 u32 num = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002177 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla4cbdaf62012-08-28 20:37:40 +00002178 !sriov_want(adapter) && be_physfn(adapter)) {
Yuval Mintz30e80b52012-07-01 03:19:00 +00002179 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2180 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2181 }
2182 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002183}
2184
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185static void be_msix_enable(struct be_adapter *adapter)
2186{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002187#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002188 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002190 /* If RSS queues are not used, need a vec for default RX Q */
2191 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002192 if (be_roce_supported(adapter)) {
2193 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2194 (num_online_cpus() + 1));
2195 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2196 num_vec += num_roce_vec;
2197 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2198 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002199 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002200
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002201 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202 adapter->msix_entries[i].entry = i;
2203
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002204 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002205 if (status == 0) {
2206 goto done;
2207 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002208 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002209 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002210 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002211 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 }
2213 return;
2214done:
Parav Pandit045508a2012-03-26 14:27:13 +00002215 if (be_roce_supported(adapter)) {
2216 if (num_vec > num_roce_vec) {
2217 adapter->num_msix_vec = num_vec - num_roce_vec;
2218 adapter->num_msix_roce_vec =
2219 num_vec - adapter->num_msix_vec;
2220 } else {
2221 adapter->num_msix_vec = num_vec;
2222 adapter->num_msix_roce_vec = 0;
2223 }
2224 } else
2225 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002226 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227}
2228
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002229static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002230 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002231{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233}
2234
2235static int be_msix_register(struct be_adapter *adapter)
2236{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002237 struct net_device *netdev = adapter->netdev;
2238 struct be_eq_obj *eqo;
2239 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 for_all_evt_queues(adapter, eqo, i) {
2242 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2243 vec = be_msix_vec_get(adapter, eqo);
2244 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002245 if (status)
2246 goto err_msix;
2247 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002248
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002250err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2252 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2253 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2254 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002255 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256 return status;
2257}
2258
2259static int be_irq_register(struct be_adapter *adapter)
2260{
2261 struct net_device *netdev = adapter->netdev;
2262 int status;
2263
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002264 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265 status = be_msix_register(adapter);
2266 if (status == 0)
2267 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002268 /* INTx is not supported for VF */
2269 if (!be_physfn(adapter))
2270 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 }
2272
2273 /* INTx */
2274 netdev->irq = adapter->pdev->irq;
2275 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2276 adapter);
2277 if (status) {
2278 dev_err(&adapter->pdev->dev,
2279 "INTx request IRQ failed - err %d\n", status);
2280 return status;
2281 }
2282done:
2283 adapter->isr_registered = true;
2284 return 0;
2285}
2286
2287static void be_irq_unregister(struct be_adapter *adapter)
2288{
2289 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002291 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002292
2293 if (!adapter->isr_registered)
2294 return;
2295
2296 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002297 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298 free_irq(netdev->irq, adapter);
2299 goto done;
2300 }
2301
2302 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002303 for_all_evt_queues(adapter, eqo, i)
2304 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002305
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306done:
2307 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308}
2309
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002311{
2312 struct be_queue_info *q;
2313 struct be_rx_obj *rxo;
2314 int i;
2315
2316 for_all_rx_queues(adapter, rxo, i) {
2317 q = &rxo->q;
2318 if (q->created) {
2319 be_cmd_rxq_destroy(adapter, q);
2320 /* After the rxq is invalidated, wait for a grace time
2321 * of 1ms for all dma to end and the flush compl to
2322 * arrive
2323 */
2324 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002325 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002326 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002328 }
2329}
2330
Sathya Perla889cd4b2010-05-30 23:33:45 +00002331static int be_close(struct net_device *netdev)
2332{
2333 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 struct be_eq_obj *eqo;
2335 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002336
Parav Pandit045508a2012-03-26 14:27:13 +00002337 be_roce_dev_close(adapter);
2338
Sathya Perla889cd4b2010-05-30 23:33:45 +00002339 be_async_mcc_disable(adapter);
2340
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002341 if (!lancer_chip(adapter))
2342 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002344 for_all_evt_queues(adapter, eqo, i) {
2345 napi_disable(&eqo->napi);
2346 if (msix_enabled(adapter))
2347 synchronize_irq(be_msix_vec_get(adapter, eqo));
2348 else
2349 synchronize_irq(netdev->irq);
2350 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002351 }
2352
Sathya Perla889cd4b2010-05-30 23:33:45 +00002353 be_irq_unregister(adapter);
2354
Sathya Perla889cd4b2010-05-30 23:33:45 +00002355 /* Wait for all pending tx completions to arrive so that
2356 * all tx skbs are freed.
2357 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002358 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002359
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002360 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002361 return 0;
2362}
2363
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002365{
2366 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002367 int rc, i, j;
2368 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002369
2370 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002371 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2372 sizeof(struct be_eth_rx_d));
2373 if (rc)
2374 return rc;
2375 }
2376
2377 /* The FW would like the default RXQ to be created first */
2378 rxo = default_rxo(adapter);
2379 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2380 adapter->if_handle, false, &rxo->rss_id);
2381 if (rc)
2382 return rc;
2383
2384 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002385 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 rx_frag_size, adapter->if_handle,
2387 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002388 if (rc)
2389 return rc;
2390 }
2391
2392 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002393 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2394 for_all_rss_queues(adapter, rxo, i) {
2395 if ((j + i) >= 128)
2396 break;
2397 rsstable[j + i] = rxo->rss_id;
2398 }
2399 }
2400 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002401 if (rc)
2402 return rc;
2403 }
2404
2405 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002406 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002407 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002408 return 0;
2409}
2410
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002411static int be_open(struct net_device *netdev)
2412{
2413 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002414 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002415 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002416 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002417 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002418 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002419
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002420 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002421 if (status)
2422 goto err;
2423
Sathya Perla5fb379e2009-06-18 00:02:59 +00002424 be_irq_register(adapter);
2425
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002426 if (!lancer_chip(adapter))
2427 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002428
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002429 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002430 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002431
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002432 for_all_tx_queues(adapter, txo, i)
2433 be_cq_notify(adapter, txo->cq.id, true, 0);
2434
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002435 be_async_mcc_enable(adapter);
2436
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 for_all_evt_queues(adapter, eqo, i) {
2438 napi_enable(&eqo->napi);
2439 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2440 }
2441
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002442 status = be_cmd_link_status_query(adapter, NULL, NULL,
2443 &link_status, 0);
2444 if (!status)
2445 be_link_status_update(adapter, link_status);
2446
Parav Pandit045508a2012-03-26 14:27:13 +00002447 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002448 return 0;
2449err:
2450 be_close(adapter->netdev);
2451 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002452}
2453
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002454static int be_setup_wol(struct be_adapter *adapter, bool enable)
2455{
2456 struct be_dma_mem cmd;
2457 int status = 0;
2458 u8 mac[ETH_ALEN];
2459
2460 memset(mac, 0, ETH_ALEN);
2461
2462 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002463 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2464 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002465 if (cmd.va == NULL)
2466 return -1;
2467 memset(cmd.va, 0, cmd.size);
2468
2469 if (enable) {
2470 status = pci_write_config_dword(adapter->pdev,
2471 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2472 if (status) {
2473 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002474 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002475 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2476 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002477 return status;
2478 }
2479 status = be_cmd_enable_magic_wol(adapter,
2480 adapter->netdev->dev_addr, &cmd);
2481 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2482 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2483 } else {
2484 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2485 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2486 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2487 }
2488
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002489 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002490 return status;
2491}
2492
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002493/*
2494 * Generate a seed MAC address from the PF MAC Address using jhash.
2495 * MAC Address for VFs are assigned incrementally starting from the seed.
2496 * These addresses are programmed in the ASIC by the PF and the VF driver
2497 * queries for the MAC address during its probe.
2498 */
2499static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2500{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002501 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002502 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002503 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002504 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002505
2506 be_vf_eth_addr_generate(adapter, mac);
2507
Sathya Perla11ac75e2011-12-13 00:58:50 +00002508 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002509 if (lancer_chip(adapter)) {
2510 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2511 } else {
2512 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002513 vf_cfg->if_handle,
2514 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002515 }
2516
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002517 if (status)
2518 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002519 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002520 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002521 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002522
2523 mac[5] += 1;
2524 }
2525 return status;
2526}
2527
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002528static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002529{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002530 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002531 u32 vf;
2532
Sathya Perla39f1d942012-05-08 19:41:24 +00002533 if (be_find_vfs(adapter, ASSIGNED)) {
2534 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2535 goto done;
2536 }
2537
Sathya Perla11ac75e2011-12-13 00:58:50 +00002538 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002539 if (lancer_chip(adapter))
2540 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2541 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002542 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2543 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002544
Sathya Perla11ac75e2011-12-13 00:58:50 +00002545 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2546 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002547 pci_disable_sriov(adapter->pdev);
2548done:
2549 kfree(adapter->vf_cfg);
2550 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002551}
2552
Sathya Perlaa54769f2011-10-24 02:45:00 +00002553static int be_clear(struct be_adapter *adapter)
2554{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002555 int i = 1;
2556
Sathya Perla191eb752012-02-23 18:50:13 +00002557 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2558 cancel_delayed_work_sync(&adapter->work);
2559 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2560 }
2561
Sathya Perla11ac75e2011-12-13 00:58:50 +00002562 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002563 be_vf_clear(adapter);
2564
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002565 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2566 be_cmd_pmac_del(adapter, adapter->if_handle,
2567 adapter->pmac_id[i], 0);
2568
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002569 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002570
2571 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002572 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002573 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002574 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002577 return 0;
2578}
2579
Sathya Perla39f1d942012-05-08 19:41:24 +00002580static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002581{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002582 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002583 int vf;
2584
Sathya Perla39f1d942012-05-08 19:41:24 +00002585 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2586 GFP_KERNEL);
2587 if (!adapter->vf_cfg)
2588 return -ENOMEM;
2589
Sathya Perla11ac75e2011-12-13 00:58:50 +00002590 for_all_vfs(adapter, vf_cfg, vf) {
2591 vf_cfg->if_handle = -1;
2592 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002593 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002594 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002595}
2596
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002597static int be_vf_setup(struct be_adapter *adapter)
2598{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002599 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002600 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002601 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002602 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002603 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002604
Sathya Perla39f1d942012-05-08 19:41:24 +00002605 enabled_vfs = be_find_vfs(adapter, ENABLED);
2606 if (enabled_vfs) {
2607 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2608 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2609 return 0;
2610 }
2611
2612 if (num_vfs > adapter->dev_num_vfs) {
2613 dev_warn(dev, "Device supports %d VFs and not %d\n",
2614 adapter->dev_num_vfs, num_vfs);
2615 num_vfs = adapter->dev_num_vfs;
2616 }
2617
2618 status = pci_enable_sriov(adapter->pdev, num_vfs);
2619 if (!status) {
2620 adapter->num_vfs = num_vfs;
2621 } else {
2622 /* Platform doesn't support SRIOV though device supports it */
2623 dev_warn(dev, "SRIOV enable failed\n");
2624 return 0;
2625 }
2626
2627 status = be_vf_setup_init(adapter);
2628 if (status)
2629 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002630
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002631 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2632 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002633 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002634 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2635 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002636 if (status)
2637 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002638 }
2639
Sathya Perla39f1d942012-05-08 19:41:24 +00002640 if (!enabled_vfs) {
2641 status = be_vf_eth_addr_config(adapter);
2642 if (status)
2643 goto err;
2644 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002645
Sathya Perla11ac75e2011-12-13 00:58:50 +00002646 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002647 lnk_speed = 1000;
2648 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002649 if (status)
2650 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002651 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002652
2653 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2654 vf + 1, vf_cfg->if_handle);
2655 if (status)
2656 goto err;
2657 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002658 }
2659 return 0;
2660err:
2661 return status;
2662}
2663
Sathya Perla30128032011-11-10 19:17:57 +00002664static void be_setup_init(struct be_adapter *adapter)
2665{
2666 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002667 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002668 adapter->if_handle = -1;
2669 adapter->be3_native = false;
2670 adapter->promiscuous = false;
2671 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002672 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002673}
2674
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002675static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2676 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002677{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002678 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002679
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002680 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2681 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2682 if (!lancer_chip(adapter) && !be_physfn(adapter))
2683 *active_mac = true;
2684 else
2685 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002686
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002687 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002688 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002689
2690 if (lancer_chip(adapter)) {
2691 status = be_cmd_get_mac_from_list(adapter, mac,
2692 active_mac, pmac_id, 0);
2693 if (*active_mac) {
2694 status = be_cmd_mac_addr_query(adapter, mac,
2695 MAC_ADDRESS_TYPE_NETWORK,
2696 false, if_handle,
2697 *pmac_id);
2698 }
2699 } else if (be_physfn(adapter)) {
2700 /* For BE3, for PF get permanent MAC */
2701 status = be_cmd_mac_addr_query(adapter, mac,
2702 MAC_ADDRESS_TYPE_NETWORK, true,
2703 0, 0);
2704 *active_mac = false;
2705 } else {
2706 /* For BE3, for VF get soft MAC assigned by PF*/
2707 status = be_cmd_mac_addr_query(adapter, mac,
2708 MAC_ADDRESS_TYPE_NETWORK, false,
2709 if_handle, 0);
2710 *active_mac = true;
2711 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002712 return status;
2713}
2714
Sathya Perla39f1d942012-05-08 19:41:24 +00002715/* Routine to query per function resource limits */
2716static int be_get_config(struct be_adapter *adapter)
2717{
2718 int pos;
2719 u16 dev_num_vfs;
2720
2721 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2722 if (pos) {
2723 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2724 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002725 if (!lancer_chip(adapter))
2726 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002727 adapter->dev_num_vfs = dev_num_vfs;
2728 }
2729 return 0;
2730}
2731
Sathya Perla5fb379e2009-06-18 00:02:59 +00002732static int be_setup(struct be_adapter *adapter)
2733{
Sathya Perla39f1d942012-05-08 19:41:24 +00002734 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002735 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002736 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002737 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002738 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002739 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740
Sathya Perla30128032011-11-10 19:17:57 +00002741 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002742
Sathya Perla39f1d942012-05-08 19:41:24 +00002743 be_get_config(adapter);
2744
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002745 be_cmd_req_native_mode(adapter);
2746
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002747 be_msix_enable(adapter);
2748
2749 status = be_evt_queues_create(adapter);
2750 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002751 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002752
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002753 status = be_tx_cqs_create(adapter);
2754 if (status)
2755 goto err;
2756
2757 status = be_rx_cqs_create(adapter);
2758 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002759 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002760
Sathya Perla5fb379e2009-06-18 00:02:59 +00002761 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002762 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002763 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002764
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002765 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2766 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2767 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002768 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2769
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002770 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2771 cap_flags |= BE_IF_FLAGS_RSS;
2772 en_flags |= BE_IF_FLAGS_RSS;
2773 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002774
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002775 if (lancer_chip(adapter) && !be_physfn(adapter)) {
2776 en_flags = BE_IF_FLAGS_UNTAGGED |
2777 BE_IF_FLAGS_BROADCAST |
2778 BE_IF_FLAGS_MULTICAST;
2779 cap_flags = en_flags;
2780 }
2781
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002782 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002783 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002784 if (status != 0)
2785 goto err;
2786
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002787 memset(mac, 0, ETH_ALEN);
2788 active_mac = false;
2789 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2790 &active_mac, &adapter->pmac_id[0]);
2791 if (status != 0)
2792 goto err;
2793
2794 if (!active_mac) {
2795 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2796 &adapter->pmac_id[0], 0);
2797 if (status != 0)
2798 goto err;
2799 }
2800
2801 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2802 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2803 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002804 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002805
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002806 status = be_tx_qs_create(adapter);
2807 if (status)
2808 goto err;
2809
Sathya Perla04b71172011-09-27 13:30:27 -04002810 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002811
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002812 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002813 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002814
2815 be_set_rx_mode(adapter->netdev);
2816
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002817 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002818
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002819 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2820 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002821 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002822
Sathya Perla39f1d942012-05-08 19:41:24 +00002823 if (be_physfn(adapter) && num_vfs) {
2824 if (adapter->dev_num_vfs)
2825 be_vf_setup(adapter);
2826 else
2827 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002828 }
2829
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002830 be_cmd_get_phy_info(adapter);
2831 if (be_pause_supported(adapter))
2832 adapter->phy.fc_autoneg = 1;
2833
Sathya Perla191eb752012-02-23 18:50:13 +00002834 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2835 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002836 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002837err:
2838 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002839 return status;
2840}
2841
Ivan Vecera66268732011-12-08 01:31:21 +00002842#ifdef CONFIG_NET_POLL_CONTROLLER
2843static void be_netpoll(struct net_device *netdev)
2844{
2845 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002846 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002847 int i;
2848
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002849 for_all_evt_queues(adapter, eqo, i)
2850 event_handle(eqo);
2851
2852 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002853}
2854#endif
2855
Ajit Khaparde84517482009-09-04 03:12:16 +00002856#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002857char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2858
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002859static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002860 const u8 *p, u32 img_start, int image_size,
2861 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002862{
2863 u32 crc_offset;
2864 u8 flashed_crc[4];
2865 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002866
2867 crc_offset = hdr_size + img_start + image_size - 4;
2868
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002869 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002870
2871 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002872 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002873 if (status) {
2874 dev_err(&adapter->pdev->dev,
2875 "could not get crc from flash, not flashing redboot\n");
2876 return false;
2877 }
2878
2879 /*update redboot only if crc does not match*/
2880 if (!memcmp(flashed_crc, p, 4))
2881 return false;
2882 else
2883 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002884}
2885
Sathya Perla306f1342011-08-02 19:57:45 +00002886static bool phy_flashing_required(struct be_adapter *adapter)
2887{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002888 return (adapter->phy.phy_type == TN_8022 &&
2889 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002890}
2891
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002892static bool is_comp_in_ufi(struct be_adapter *adapter,
2893 struct flash_section_info *fsec, int type)
2894{
2895 int i = 0, img_type = 0;
2896 struct flash_section_info_g2 *fsec_g2 = NULL;
2897
2898 if (adapter->generation != BE_GEN3)
2899 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2900
2901 for (i = 0; i < MAX_FLASH_COMP; i++) {
2902 if (fsec_g2)
2903 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2904 else
2905 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2906
2907 if (img_type == type)
2908 return true;
2909 }
2910 return false;
2911
2912}
2913
2914struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2915 int header_size,
2916 const struct firmware *fw)
2917{
2918 struct flash_section_info *fsec = NULL;
2919 const u8 *p = fw->data;
2920
2921 p += header_size;
2922 while (p < (fw->data + fw->size)) {
2923 fsec = (struct flash_section_info *)p;
2924 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2925 return fsec;
2926 p += 32;
2927 }
2928 return NULL;
2929}
2930
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002931static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002932 const struct firmware *fw,
2933 struct be_dma_mem *flash_cmd,
2934 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002935
Ajit Khaparde84517482009-09-04 03:12:16 +00002936{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002937 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002938 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002939 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002940 int num_bytes;
2941 const u8 *p = fw->data;
2942 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002943 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002944 int num_comp, hdr_size;
2945 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002946
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002947 struct flash_comp gen3_flash_types[] = {
2948 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2949 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2950 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2951 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2952 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2953 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2954 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2955 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2956 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2957 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2958 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2959 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2960 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2961 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2962 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2963 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2964 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2965 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2966 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2967 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002968 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002969
2970 struct flash_comp gen2_flash_types[] = {
2971 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2972 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2973 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2974 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2975 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2976 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2977 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2978 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2979 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2980 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2981 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2982 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2983 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2984 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2985 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2986 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002987 };
2988
2989 if (adapter->generation == BE_GEN3) {
2990 pflashcomp = gen3_flash_types;
2991 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002992 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002993 } else {
2994 pflashcomp = gen2_flash_types;
2995 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002996 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002997 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002998 /* Get flash section info*/
2999 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3000 if (!fsec) {
3001 dev_err(&adapter->pdev->dev,
3002 "Invalid Cookie. UFI corrupted ?\n");
3003 return -1;
3004 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003005 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003006 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003007 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003008
3009 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3010 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3011 continue;
3012
3013 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003014 if (!phy_flashing_required(adapter))
3015 continue;
3016 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003017
3018 hdr_size = filehdr_size +
3019 (num_of_images * sizeof(struct image_hdr));
3020
3021 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3022 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3023 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003024 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003025
3026 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003027 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003028 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003029 if (p + pflashcomp[i].size > fw->data + fw->size)
3030 return -1;
3031 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003032 while (total_bytes) {
3033 if (total_bytes > 32*1024)
3034 num_bytes = 32*1024;
3035 else
3036 num_bytes = total_bytes;
3037 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003038 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003039 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003040 flash_op = FLASHROM_OPER_PHY_FLASH;
3041 else
3042 flash_op = FLASHROM_OPER_FLASH;
3043 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003044 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003045 flash_op = FLASHROM_OPER_PHY_SAVE;
3046 else
3047 flash_op = FLASHROM_OPER_SAVE;
3048 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003049 memcpy(req->params.data_buf, p, num_bytes);
3050 p += num_bytes;
3051 status = be_cmd_write_flashrom(adapter, flash_cmd,
3052 pflashcomp[i].optype, flash_op, num_bytes);
3053 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003054 if ((status == ILLEGAL_IOCTL_REQ) &&
3055 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003056 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003057 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003058 dev_err(&adapter->pdev->dev,
3059 "cmd to write to flash rom failed.\n");
3060 return -1;
3061 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003062 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003063 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003064 return 0;
3065}
3066
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003067static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3068{
3069 if (fhdr == NULL)
3070 return 0;
3071 if (fhdr->build[0] == '3')
3072 return BE_GEN3;
3073 else if (fhdr->build[0] == '2')
3074 return BE_GEN2;
3075 else
3076 return 0;
3077}
3078
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003079static int lancer_wait_idle(struct be_adapter *adapter)
3080{
3081#define SLIPORT_IDLE_TIMEOUT 30
3082 u32 reg_val;
3083 int status = 0, i;
3084
3085 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3086 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3087 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3088 break;
3089
3090 ssleep(1);
3091 }
3092
3093 if (i == SLIPORT_IDLE_TIMEOUT)
3094 status = -1;
3095
3096 return status;
3097}
3098
3099static int lancer_fw_reset(struct be_adapter *adapter)
3100{
3101 int status = 0;
3102
3103 status = lancer_wait_idle(adapter);
3104 if (status)
3105 return status;
3106
3107 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3108 PHYSDEV_CONTROL_OFFSET);
3109
3110 return status;
3111}
3112
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003113static int lancer_fw_download(struct be_adapter *adapter,
3114 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003115{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003116#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3117#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3118 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003119 const u8 *data_ptr = NULL;
3120 u8 *dest_image_ptr = NULL;
3121 size_t image_size = 0;
3122 u32 chunk_size = 0;
3123 u32 data_written = 0;
3124 u32 offset = 0;
3125 int status = 0;
3126 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003127 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003128
3129 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3130 dev_err(&adapter->pdev->dev,
3131 "FW Image not properly aligned. "
3132 "Length must be 4 byte aligned.\n");
3133 status = -EINVAL;
3134 goto lancer_fw_exit;
3135 }
3136
3137 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3138 + LANCER_FW_DOWNLOAD_CHUNK;
3139 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3140 &flash_cmd.dma, GFP_KERNEL);
3141 if (!flash_cmd.va) {
3142 status = -ENOMEM;
3143 dev_err(&adapter->pdev->dev,
3144 "Memory allocation failure while flashing\n");
3145 goto lancer_fw_exit;
3146 }
3147
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003148 dest_image_ptr = flash_cmd.va +
3149 sizeof(struct lancer_cmd_req_write_object);
3150 image_size = fw->size;
3151 data_ptr = fw->data;
3152
3153 while (image_size) {
3154 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3155
3156 /* Copy the image chunk content. */
3157 memcpy(dest_image_ptr, data_ptr, chunk_size);
3158
3159 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003160 chunk_size, offset,
3161 LANCER_FW_DOWNLOAD_LOCATION,
3162 &data_written, &change_status,
3163 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003164 if (status)
3165 break;
3166
3167 offset += data_written;
3168 data_ptr += data_written;
3169 image_size -= data_written;
3170 }
3171
3172 if (!status) {
3173 /* Commit the FW written */
3174 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003175 0, offset,
3176 LANCER_FW_DOWNLOAD_LOCATION,
3177 &data_written, &change_status,
3178 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003179 }
3180
3181 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3182 flash_cmd.dma);
3183 if (status) {
3184 dev_err(&adapter->pdev->dev,
3185 "Firmware load error. "
3186 "Status code: 0x%x Additional Status: 0x%x\n",
3187 status, add_status);
3188 goto lancer_fw_exit;
3189 }
3190
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003191 if (change_status == LANCER_FW_RESET_NEEDED) {
3192 status = lancer_fw_reset(adapter);
3193 if (status) {
3194 dev_err(&adapter->pdev->dev,
3195 "Adapter busy for FW reset.\n"
3196 "New FW will not be active.\n");
3197 goto lancer_fw_exit;
3198 }
3199 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3200 dev_err(&adapter->pdev->dev,
3201 "System reboot required for new FW"
3202 " to be active\n");
3203 }
3204
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003205 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3206lancer_fw_exit:
3207 return status;
3208}
3209
3210static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3211{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003212 struct flash_file_hdr_g2 *fhdr;
3213 struct flash_file_hdr_g3 *fhdr3;
3214 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003215 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003216 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003217 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003218
3219 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003220 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003221
Ajit Khaparde84517482009-09-04 03:12:16 +00003222 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003223 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3224 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003225 if (!flash_cmd.va) {
3226 status = -ENOMEM;
3227 dev_err(&adapter->pdev->dev,
3228 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003229 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003230 }
3231
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003232 if ((adapter->generation == BE_GEN3) &&
3233 (get_ufigen_type(fhdr) == BE_GEN3)) {
3234 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003235 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3236 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003237 img_hdr_ptr = (struct image_hdr *) (fw->data +
3238 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003239 i * sizeof(struct image_hdr)));
3240 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3241 status = be_flash_data(adapter, fw, &flash_cmd,
3242 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003243 }
3244 } else if ((adapter->generation == BE_GEN2) &&
3245 (get_ufigen_type(fhdr) == BE_GEN2)) {
3246 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3247 } else {
3248 dev_err(&adapter->pdev->dev,
3249 "UFI and Interface are not compatible for flashing\n");
3250 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003251 }
3252
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003253 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3254 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003255 if (status) {
3256 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003257 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003258 }
3259
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003260 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003261
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003262be_fw_exit:
3263 return status;
3264}
3265
3266int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3267{
3268 const struct firmware *fw;
3269 int status;
3270
3271 if (!netif_running(adapter->netdev)) {
3272 dev_err(&adapter->pdev->dev,
3273 "Firmware load not allowed (interface is down)\n");
3274 return -1;
3275 }
3276
3277 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3278 if (status)
3279 goto fw_exit;
3280
3281 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3282
3283 if (lancer_chip(adapter))
3284 status = lancer_fw_download(adapter, fw);
3285 else
3286 status = be_fw_download(adapter, fw);
3287
Ajit Khaparde84517482009-09-04 03:12:16 +00003288fw_exit:
3289 release_firmware(fw);
3290 return status;
3291}
3292
stephen hemmingere5686ad2012-01-05 19:10:25 +00003293static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003294 .ndo_open = be_open,
3295 .ndo_stop = be_close,
3296 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003297 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003298 .ndo_set_mac_address = be_mac_addr_set,
3299 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003300 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003301 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003302 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3303 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003304 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003305 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003306 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003307 .ndo_get_vf_config = be_get_vf_config,
3308#ifdef CONFIG_NET_POLL_CONTROLLER
3309 .ndo_poll_controller = be_netpoll,
3310#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003311};
3312
3313static void be_netdev_init(struct net_device *netdev)
3314{
3315 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003316 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003317 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003318
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003319 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003320 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3321 NETIF_F_HW_VLAN_TX;
3322 if (be_multi_rxq(adapter))
3323 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003324
3325 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003326 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003327
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003328 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003329 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003330
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003331 netdev->priv_flags |= IFF_UNICAST_FLT;
3332
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003333 netdev->flags |= IFF_MULTICAST;
3334
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003335 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003336
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003337 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003338
3339 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3340
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003341 for_all_evt_queues(adapter, eqo, i)
3342 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003343}
3344
3345static void be_unmap_pci_bars(struct be_adapter *adapter)
3346{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003347 if (adapter->csr)
3348 iounmap(adapter->csr);
3349 if (adapter->db)
3350 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003351 if (adapter->roce_db.base)
3352 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3353}
3354
3355static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3356{
3357 struct pci_dev *pdev = adapter->pdev;
3358 u8 __iomem *addr;
3359
3360 addr = pci_iomap(pdev, 2, 0);
3361 if (addr == NULL)
3362 return -ENOMEM;
3363
3364 adapter->roce_db.base = addr;
3365 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3366 adapter->roce_db.size = 8192;
3367 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3368 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003369}
3370
3371static int be_map_pci_bars(struct be_adapter *adapter)
3372{
3373 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003374 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003375
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003376 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003377 if (be_type_2_3(adapter)) {
3378 addr = ioremap_nocache(
3379 pci_resource_start(adapter->pdev, 0),
3380 pci_resource_len(adapter->pdev, 0));
3381 if (addr == NULL)
3382 return -ENOMEM;
3383 adapter->db = addr;
3384 }
3385 if (adapter->if_type == SLI_INTF_TYPE_3) {
3386 if (lancer_roce_map_pci_bars(adapter))
3387 goto pci_map_err;
3388 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003389 return 0;
3390 }
3391
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003392 if (be_physfn(adapter)) {
3393 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3394 pci_resource_len(adapter->pdev, 2));
3395 if (addr == NULL)
3396 return -ENOMEM;
3397 adapter->csr = addr;
3398 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003399
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003400 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003401 db_reg = 4;
3402 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003403 if (be_physfn(adapter))
3404 db_reg = 4;
3405 else
3406 db_reg = 0;
3407 }
3408 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3409 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003410 if (addr == NULL)
3411 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003412 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003413 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3414 adapter->roce_db.size = 4096;
3415 adapter->roce_db.io_addr =
3416 pci_resource_start(adapter->pdev, db_reg);
3417 adapter->roce_db.total_size =
3418 pci_resource_len(adapter->pdev, db_reg);
3419 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003420 return 0;
3421pci_map_err:
3422 be_unmap_pci_bars(adapter);
3423 return -ENOMEM;
3424}
3425
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003426static void be_ctrl_cleanup(struct be_adapter *adapter)
3427{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003428 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003429
3430 be_unmap_pci_bars(adapter);
3431
3432 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003433 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3434 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003435
Sathya Perla5b8821b2011-08-02 19:57:44 +00003436 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003437 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003438 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3439 mem->dma);
Sathya Perlacc7d7232012-08-28 20:37:43 +00003440 kfree(adapter->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003441}
3442
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003443static int be_ctrl_init(struct be_adapter *adapter)
3444{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003445 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3446 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003447 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003448 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449
3450 status = be_map_pci_bars(adapter);
3451 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003452 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003453
3454 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003455 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3456 mbox_mem_alloc->size,
3457 &mbox_mem_alloc->dma,
3458 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003459 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003460 status = -ENOMEM;
3461 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003462 }
3463 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3464 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3465 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3466 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003467
Sathya Perla5b8821b2011-08-02 19:57:44 +00003468 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3469 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3470 &rx_filter->dma, GFP_KERNEL);
3471 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003472 status = -ENOMEM;
3473 goto free_mbox;
3474 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003475 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003476
Sathya Perlacc7d7232012-08-28 20:37:43 +00003477 /* primary mac needs 1 pmac entry */
3478 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3479 sizeof(*adapter->pmac_id), GFP_KERNEL);
3480 if (!adapter->pmac_id)
3481 return -ENOMEM;
3482
Ivan Vecera29849612010-12-14 05:43:19 +00003483 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003484 spin_lock_init(&adapter->mcc_lock);
3485 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003486
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003487 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003488 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003489 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003490
3491free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003492 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3493 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003494
3495unmap_pci_bars:
3496 be_unmap_pci_bars(adapter);
3497
3498done:
3499 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003500}
3501
3502static void be_stats_cleanup(struct be_adapter *adapter)
3503{
Sathya Perla3abcded2010-10-03 22:12:27 -07003504 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003505
3506 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003507 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3508 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003509}
3510
3511static int be_stats_init(struct be_adapter *adapter)
3512{
Sathya Perla3abcded2010-10-03 22:12:27 -07003513 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003514
Selvin Xavier005d5692011-05-16 07:36:35 +00003515 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003516 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003517 } else {
3518 if (lancer_chip(adapter))
3519 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3520 else
3521 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3522 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003523 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3524 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003525 if (cmd->va == NULL)
3526 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003527 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003528 return 0;
3529}
3530
3531static void __devexit be_remove(struct pci_dev *pdev)
3532{
3533 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003534
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003535 if (!adapter)
3536 return;
3537
Parav Pandit045508a2012-03-26 14:27:13 +00003538 be_roce_dev_remove(adapter);
3539
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003540 cancel_delayed_work_sync(&adapter->func_recovery_work);
3541
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003542 unregister_netdev(adapter->netdev);
3543
Sathya Perla5fb379e2009-06-18 00:02:59 +00003544 be_clear(adapter);
3545
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003546 /* tell fw we're done with firing cmds */
3547 be_cmd_fw_clean(adapter);
3548
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003549 be_stats_cleanup(adapter);
3550
3551 be_ctrl_cleanup(adapter);
3552
Sathya Perlad6b6d982012-09-05 01:56:48 +00003553 pci_disable_pcie_error_reporting(pdev);
3554
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003555 pci_set_drvdata(pdev, NULL);
3556 pci_release_regions(pdev);
3557 pci_disable_device(pdev);
3558
3559 free_netdev(adapter->netdev);
3560}
3561
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003562bool be_is_wol_supported(struct be_adapter *adapter)
3563{
3564 return ((adapter->wol_cap & BE_WOL_CAP) &&
3565 !be_is_wol_excluded(adapter)) ? true : false;
3566}
3567
Somnath Kotur941a77d2012-05-17 22:59:03 +00003568u32 be_get_fw_log_level(struct be_adapter *adapter)
3569{
3570 struct be_dma_mem extfat_cmd;
3571 struct be_fat_conf_params *cfgs;
3572 int status;
3573 u32 level = 0;
3574 int j;
3575
3576 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3577 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3578 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3579 &extfat_cmd.dma);
3580
3581 if (!extfat_cmd.va) {
3582 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3583 __func__);
3584 goto err;
3585 }
3586
3587 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3588 if (!status) {
3589 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3590 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003591 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003592 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3593 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3594 }
3595 }
3596 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3597 extfat_cmd.dma);
3598err:
3599 return level;
3600}
Sathya Perla39f1d942012-05-08 19:41:24 +00003601static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003602{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003603 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003604 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003605
Sathya Perla3abcded2010-10-03 22:12:27 -07003606 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3607 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003608 if (status)
3609 return status;
3610
Sathya Perla752961a2011-10-24 02:45:03 +00003611 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003612 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003613 else
3614 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3615
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003616 if (be_physfn(adapter))
3617 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3618 else
3619 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3620
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003621 status = be_cmd_get_cntl_attributes(adapter);
3622 if (status)
3623 return status;
3624
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003625 status = be_cmd_get_acpi_wol_cap(adapter);
3626 if (status) {
3627 /* in case of a failure to get wol capabillities
3628 * check the exclusion list to determine WOL capability */
3629 if (!be_is_wol_excluded(adapter))
3630 adapter->wol_cap |= BE_WOL_CAP;
3631 }
3632
3633 if (be_is_wol_supported(adapter))
3634 adapter->wol = true;
3635
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003636 /* Must be a power of 2 or else MODULO will BUG_ON */
3637 adapter->be_get_temp_freq = 64;
3638
Somnath Kotur941a77d2012-05-17 22:59:03 +00003639 level = be_get_fw_log_level(adapter);
3640 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3641
Sathya Perla2243e2e2009-11-22 22:02:03 +00003642 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003643}
3644
Sathya Perla39f1d942012-05-08 19:41:24 +00003645static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003646{
3647 struct pci_dev *pdev = adapter->pdev;
3648 u32 sli_intf = 0, if_type;
3649
3650 switch (pdev->device) {
3651 case BE_DEVICE_ID1:
3652 case OC_DEVICE_ID1:
3653 adapter->generation = BE_GEN2;
3654 break;
3655 case BE_DEVICE_ID2:
3656 case OC_DEVICE_ID2:
3657 adapter->generation = BE_GEN3;
3658 break;
3659 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003660 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003661 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003662 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3663 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003664 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3665 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003666 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003667 !be_type_2_3(adapter)) {
3668 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3669 return -EINVAL;
3670 }
3671 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3672 SLI_INTF_FAMILY_SHIFT);
3673 adapter->generation = BE_GEN3;
3674 break;
3675 case OC_DEVICE_ID5:
3676 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3677 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003678 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3679 return -EINVAL;
3680 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003681 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3682 SLI_INTF_FAMILY_SHIFT);
3683 adapter->generation = BE_GEN3;
3684 break;
3685 default:
3686 adapter->generation = 0;
3687 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003688
3689 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3690 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003691 return 0;
3692}
3693
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003694static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003695{
3696 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003697
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003698 status = lancer_test_and_set_rdy_state(adapter);
3699 if (status)
3700 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003701
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003702 if (netif_running(adapter->netdev))
3703 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003704
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003705 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003706
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003707 adapter->hw_error = false;
3708 adapter->fw_timeout = false;
3709
3710 status = be_setup(adapter);
3711 if (status)
3712 goto err;
3713
3714 if (netif_running(adapter->netdev)) {
3715 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003716 if (status)
3717 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003718 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003719
3720 dev_err(&adapter->pdev->dev,
3721 "Adapter SLIPORT recovery succeeded\n");
3722 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003723err:
3724 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003725 "Adapter SLIPORT recovery failed\n");
3726
3727 return status;
3728}
3729
3730static void be_func_recovery_task(struct work_struct *work)
3731{
3732 struct be_adapter *adapter =
3733 container_of(work, struct be_adapter, func_recovery_work.work);
3734 int status;
3735
3736 be_detect_error(adapter);
3737
3738 if (adapter->hw_error && lancer_chip(adapter)) {
3739
3740 if (adapter->eeh_error)
3741 goto out;
3742
3743 rtnl_lock();
3744 netif_device_detach(adapter->netdev);
3745 rtnl_unlock();
3746
3747 status = lancer_recover_func(adapter);
3748
3749 if (!status)
3750 netif_device_attach(adapter->netdev);
3751 }
3752
3753out:
3754 schedule_delayed_work(&adapter->func_recovery_work,
3755 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003756}
3757
3758static void be_worker(struct work_struct *work)
3759{
3760 struct be_adapter *adapter =
3761 container_of(work, struct be_adapter, work.work);
3762 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003763 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003764 int i;
3765
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003766 /* when interrupts are not yet enabled, just reap any pending
3767 * mcc completions */
3768 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00003769 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003770 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00003771 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003772 goto reschedule;
3773 }
3774
3775 if (!adapter->stats_cmd_sent) {
3776 if (lancer_chip(adapter))
3777 lancer_cmd_get_pport_stats(adapter,
3778 &adapter->stats_cmd);
3779 else
3780 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3781 }
3782
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003783 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3784 be_cmd_get_die_temperature(adapter);
3785
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003786 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003787 if (rxo->rx_post_starved) {
3788 rxo->rx_post_starved = false;
3789 be_post_rx_frags(rxo, GFP_KERNEL);
3790 }
3791 }
3792
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003793 for_all_evt_queues(adapter, eqo, i)
3794 be_eqd_update(adapter, eqo);
3795
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003796reschedule:
3797 adapter->work_counter++;
3798 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3799}
3800
Sathya Perla39f1d942012-05-08 19:41:24 +00003801static bool be_reset_required(struct be_adapter *adapter)
3802{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003803 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003804}
3805
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003806static int __devinit be_probe(struct pci_dev *pdev,
3807 const struct pci_device_id *pdev_id)
3808{
3809 int status = 0;
3810 struct be_adapter *adapter;
3811 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003812 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003813
3814 status = pci_enable_device(pdev);
3815 if (status)
3816 goto do_none;
3817
3818 status = pci_request_regions(pdev, DRV_NAME);
3819 if (status)
3820 goto disable_dev;
3821 pci_set_master(pdev);
3822
Sathya Perla7f640062012-06-05 19:37:20 +00003823 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003824 if (netdev == NULL) {
3825 status = -ENOMEM;
3826 goto rel_reg;
3827 }
3828 adapter = netdev_priv(netdev);
3829 adapter->pdev = pdev;
3830 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003831
Sathya Perla39f1d942012-05-08 19:41:24 +00003832 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003833 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003834 goto free_netdev;
3835
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003836 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003837 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003838
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003839 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840 if (!status) {
3841 netdev->features |= NETIF_F_HIGHDMA;
3842 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003843 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003844 if (status) {
3845 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3846 goto free_netdev;
3847 }
3848 }
3849
Sathya Perlad6b6d982012-09-05 01:56:48 +00003850 status = pci_enable_pcie_error_reporting(pdev);
3851 if (status)
3852 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3853
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003854 status = be_ctrl_init(adapter);
3855 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003856 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003857
Sathya Perla2243e2e2009-11-22 22:02:03 +00003858 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003859 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003860 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003861 if (status)
3862 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003863 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003864
3865 /* tell fw we're ready to fire cmds */
3866 status = be_cmd_fw_init(adapter);
3867 if (status)
3868 goto ctrl_clean;
3869
Sathya Perla39f1d942012-05-08 19:41:24 +00003870 if (be_reset_required(adapter)) {
3871 status = be_cmd_reset_function(adapter);
3872 if (status)
3873 goto ctrl_clean;
3874 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003875
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003876 /* The INTR bit may be set in the card when probed by a kdump kernel
3877 * after a crash.
3878 */
3879 if (!lancer_chip(adapter))
3880 be_intr_set(adapter, false);
3881
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003882 status = be_stats_init(adapter);
3883 if (status)
3884 goto ctrl_clean;
3885
Sathya Perla39f1d942012-05-08 19:41:24 +00003886 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003887 if (status)
3888 goto stats_clean;
3889
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003890 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003891 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003892 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003893
Sathya Perla5fb379e2009-06-18 00:02:59 +00003894 status = be_setup(adapter);
3895 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003896 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003897
Sathya Perla3abcded2010-10-03 22:12:27 -07003898 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003899 status = register_netdev(netdev);
3900 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003901 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003902
Parav Pandit045508a2012-03-26 14:27:13 +00003903 be_roce_dev_add(adapter);
3904
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003905 schedule_delayed_work(&adapter->func_recovery_work,
3906 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003907
3908 be_cmd_query_port_name(adapter, &port_name);
3909
3910 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
3911 port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003912
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003913 return 0;
3914
Sathya Perla5fb379e2009-06-18 00:02:59 +00003915unsetup:
3916 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003917msix_disable:
3918 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003919stats_clean:
3920 be_stats_cleanup(adapter);
3921ctrl_clean:
3922 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003923free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003924 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003925 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003926rel_reg:
3927 pci_release_regions(pdev);
3928disable_dev:
3929 pci_disable_device(pdev);
3930do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003931 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003932 return status;
3933}
3934
3935static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3936{
3937 struct be_adapter *adapter = pci_get_drvdata(pdev);
3938 struct net_device *netdev = adapter->netdev;
3939
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003940 if (adapter->wol)
3941 be_setup_wol(adapter, true);
3942
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003943 cancel_delayed_work_sync(&adapter->func_recovery_work);
3944
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003945 netif_device_detach(netdev);
3946 if (netif_running(netdev)) {
3947 rtnl_lock();
3948 be_close(netdev);
3949 rtnl_unlock();
3950 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003951 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003952
3953 pci_save_state(pdev);
3954 pci_disable_device(pdev);
3955 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3956 return 0;
3957}
3958
3959static int be_resume(struct pci_dev *pdev)
3960{
3961 int status = 0;
3962 struct be_adapter *adapter = pci_get_drvdata(pdev);
3963 struct net_device *netdev = adapter->netdev;
3964
3965 netif_device_detach(netdev);
3966
3967 status = pci_enable_device(pdev);
3968 if (status)
3969 return status;
3970
3971 pci_set_power_state(pdev, 0);
3972 pci_restore_state(pdev);
3973
Sathya Perla2243e2e2009-11-22 22:02:03 +00003974 /* tell fw we're ready to fire cmds */
3975 status = be_cmd_fw_init(adapter);
3976 if (status)
3977 return status;
3978
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003979 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003980 if (netif_running(netdev)) {
3981 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003982 be_open(netdev);
3983 rtnl_unlock();
3984 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003985
3986 schedule_delayed_work(&adapter->func_recovery_work,
3987 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003988 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003989
3990 if (adapter->wol)
3991 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003992
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003993 return 0;
3994}
3995
Sathya Perla82456b02010-02-17 01:35:37 +00003996/*
3997 * An FLR will stop BE from DMAing any data.
3998 */
3999static void be_shutdown(struct pci_dev *pdev)
4000{
4001 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004002
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004003 if (!adapter)
4004 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004005
Sathya Perla0f4a6822011-03-21 20:49:28 +00004006 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004007 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004008
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004009 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004010
Sathya Perla82456b02010-02-17 01:35:37 +00004011 if (adapter->wol)
4012 be_setup_wol(adapter, true);
4013
Ajit Khaparde57841862011-04-06 18:08:43 +00004014 be_cmd_reset_function(adapter);
4015
Sathya Perla82456b02010-02-17 01:35:37 +00004016 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004017}
4018
Sathya Perlacf588472010-02-14 21:22:01 +00004019static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4020 pci_channel_state_t state)
4021{
4022 struct be_adapter *adapter = pci_get_drvdata(pdev);
4023 struct net_device *netdev = adapter->netdev;
4024
4025 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4026
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004027 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004028
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004029 cancel_delayed_work_sync(&adapter->func_recovery_work);
4030
4031 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004032 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004033 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004034
4035 if (netif_running(netdev)) {
4036 rtnl_lock();
4037 be_close(netdev);
4038 rtnl_unlock();
4039 }
4040 be_clear(adapter);
4041
4042 if (state == pci_channel_io_perm_failure)
4043 return PCI_ERS_RESULT_DISCONNECT;
4044
4045 pci_disable_device(pdev);
4046
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004047 /* The error could cause the FW to trigger a flash debug dump.
4048 * Resetting the card while flash dump is in progress
4049 * can cause it not to recover; wait for it to finish
4050 */
4051 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00004052 return PCI_ERS_RESULT_NEED_RESET;
4053}
4054
4055static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4056{
4057 struct be_adapter *adapter = pci_get_drvdata(pdev);
4058 int status;
4059
4060 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004061 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004062
4063 status = pci_enable_device(pdev);
4064 if (status)
4065 return PCI_ERS_RESULT_DISCONNECT;
4066
4067 pci_set_master(pdev);
4068 pci_set_power_state(pdev, 0);
4069 pci_restore_state(pdev);
4070
4071 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004072 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004073 if (status)
4074 return PCI_ERS_RESULT_DISCONNECT;
4075
Sathya Perlad6b6d982012-09-05 01:56:48 +00004076 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004077 return PCI_ERS_RESULT_RECOVERED;
4078}
4079
4080static void be_eeh_resume(struct pci_dev *pdev)
4081{
4082 int status = 0;
4083 struct be_adapter *adapter = pci_get_drvdata(pdev);
4084 struct net_device *netdev = adapter->netdev;
4085
4086 dev_info(&adapter->pdev->dev, "EEH resume\n");
4087
4088 pci_save_state(pdev);
4089
4090 /* tell fw we're ready to fire cmds */
4091 status = be_cmd_fw_init(adapter);
4092 if (status)
4093 goto err;
4094
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004095 status = be_cmd_reset_function(adapter);
4096 if (status)
4097 goto err;
4098
Sathya Perlacf588472010-02-14 21:22:01 +00004099 status = be_setup(adapter);
4100 if (status)
4101 goto err;
4102
4103 if (netif_running(netdev)) {
4104 status = be_open(netdev);
4105 if (status)
4106 goto err;
4107 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004108
4109 schedule_delayed_work(&adapter->func_recovery_work,
4110 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004111 netif_device_attach(netdev);
4112 return;
4113err:
4114 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004115}
4116
4117static struct pci_error_handlers be_eeh_handlers = {
4118 .error_detected = be_eeh_err_detected,
4119 .slot_reset = be_eeh_reset,
4120 .resume = be_eeh_resume,
4121};
4122
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004123static struct pci_driver be_driver = {
4124 .name = DRV_NAME,
4125 .id_table = be_dev_ids,
4126 .probe = be_probe,
4127 .remove = be_remove,
4128 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004129 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004130 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004131 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004132};
4133
4134static int __init be_init_module(void)
4135{
Joe Perches8e95a202009-12-03 07:58:21 +00004136 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4137 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004138 printk(KERN_WARNING DRV_NAME
4139 " : Module param rx_frag_size must be 2048/4096/8192."
4140 " Using 2048\n");
4141 rx_frag_size = 2048;
4142 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004143
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004144 return pci_register_driver(&be_driver);
4145}
4146module_init(be_init_module);
4147
4148static void __exit be_exit_module(void)
4149{
4150 pci_unregister_driver(&be_driver);
4151}
4152module_exit(be_exit_module);