blob: 136ed64ae1528fe69417f7b49520759e1f83021c [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070051static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000052 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070086static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000087 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700110 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
Sathya Perla752961a2011-10-24 02:45:03 +0000121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 mem->va = NULL;
135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000150 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
Sathya Perla8788fdc2009-07-27 22:52:03 +0000155static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000159 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000160 return;
161
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000205 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000206 return;
207
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000224 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000225 return;
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000238 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000239 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000240 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000242 if (!is_valid_ether_addr(addr->sa_data))
243 return -EADDRNOTAVAIL;
244
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000245 /* For BE VF, MAC address is already activated by PF.
246 * Hence only operation left is updating netdev->devaddr.
247 * Update it if user is passing the same MAC which was used
248 * during configuring VF MAC from PF(Hypervisor).
249 */
250 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
251 status = be_cmd_mac_addr_query(adapter, current_mac,
252 false, adapter->if_handle, 0);
253 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
254 goto done;
255 else
256 goto err;
257 }
258
259 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
260 goto done;
261
262 /* For Lancer check if any MAC is active.
263 * If active, get its mac id.
264 */
265 if (lancer_chip(adapter) && !be_physfn(adapter))
266 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
267 &pmac_id, 0);
268
269 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
270 adapter->if_handle,
271 &adapter->pmac_id[0], 0);
272
Sathya Perlaa65027e2009-08-17 00:58:04 +0000273 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000274 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700275
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000276 if (active_mac)
277 be_cmd_pmac_del(adapter, adapter->if_handle,
278 pmac_id, 0);
279done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000280 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
281 return 0;
282err:
283 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700284 return status;
285}
286
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287static void populate_be2_stats(struct be_adapter *adapter)
288{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
290 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
291 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000293 &rxf_stats->port[adapter->port_num];
294 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000295
Sathya Perlaac124ff2011-07-25 19:10:14 +0000296 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000297 drvs->rx_pause_frames = port_stats->rx_pause_frames;
298 drvs->rx_crc_errors = port_stats->rx_crc_errors;
299 drvs->rx_control_frames = port_stats->rx_control_frames;
300 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
301 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
302 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
303 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
304 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
305 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
306 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
307 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
308 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
309 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
310 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000311 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000312 drvs->rx_dropped_header_too_small =
313 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000314 drvs->rx_address_mismatch_drops =
315 port_stats->rx_address_mismatch_drops +
316 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000317 drvs->rx_alignment_symbol_errors =
318 port_stats->rx_alignment_symbol_errors;
319
320 drvs->tx_pauseframes = port_stats->tx_pauseframes;
321 drvs->tx_controlframes = port_stats->tx_controlframes;
322
323 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000324 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000326 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000327 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000328 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000329 drvs->forwarded_packets = rxf_stats->forwarded_packets;
330 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
332 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000333 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
334}
335
336static void populate_be3_stats(struct be_adapter *adapter)
337{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
339 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
340 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 &rxf_stats->port[adapter->port_num];
343 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000346 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
347 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348 drvs->rx_pause_frames = port_stats->rx_pause_frames;
349 drvs->rx_crc_errors = port_stats->rx_crc_errors;
350 drvs->rx_control_frames = port_stats->rx_control_frames;
351 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
352 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
353 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
355 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
356 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
357 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
358 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
359 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
360 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
361 drvs->rx_dropped_header_too_small =
362 port_stats->rx_dropped_header_too_small;
363 drvs->rx_input_fifo_overflow_drop =
364 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000365 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366 drvs->rx_alignment_symbol_errors =
367 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->tx_pauseframes = port_stats->tx_pauseframes;
370 drvs->tx_controlframes = port_stats->tx_controlframes;
371 drvs->jabber_events = port_stats->jabber_events;
372 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000373 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->forwarded_packets = rxf_stats->forwarded_packets;
375 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000376 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
377 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
379}
380
Selvin Xavier005d5692011-05-16 07:36:35 +0000381static void populate_lancer_stats(struct be_adapter *adapter)
382{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 struct lancer_pport_stats *pport_stats =
386 pport_stats_from_cmd(adapter);
387
388 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
389 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
390 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
391 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000392 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
395 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
396 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
397 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
398 drvs->rx_dropped_tcp_length =
399 pport_stats->rx_dropped_invalid_tcp_length;
400 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
403 drvs->rx_dropped_header_too_small =
404 pport_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000406 drvs->rx_address_mismatch_drops =
407 pport_stats->rx_address_mismatch_drops +
408 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000409 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000410 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
412 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 drvs->forwarded_packets = pport_stats->num_forwards_lo;
415 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000416 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000418}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419
Sathya Perla09c1c682011-08-22 19:41:53 +0000420static void accumulate_16bit_val(u32 *acc, u16 val)
421{
422#define lo(x) (x & 0xFFFF)
423#define hi(x) (x & 0xFFFF0000)
424 bool wrapped = val < lo(*acc);
425 u32 newacc = hi(*acc) + val;
426
427 if (wrapped)
428 newacc += 65536;
429 ACCESS_ONCE(*acc) = newacc;
430}
431
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432void be_parse_stats(struct be_adapter *adapter)
433{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000434 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
435 struct be_rx_obj *rxo;
436 int i;
437
Selvin Xavier005d5692011-05-16 07:36:35 +0000438 if (adapter->generation == BE_GEN3) {
439 if (lancer_chip(adapter))
440 populate_lancer_stats(adapter);
441 else
442 populate_be3_stats(adapter);
443 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000445 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000446
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000447 if (lancer_chip(adapter))
448 goto done;
449
Sathya Perlaac124ff2011-07-25 19:10:14 +0000450 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000451 for_all_rx_queues(adapter, rxo, i) {
452 /* below erx HW counter can actually wrap around after
453 * 65535. Driver accumulates a 32-bit value
454 */
455 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
456 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
457 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000458done:
459 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460}
461
Sathya Perlaab1594e2011-07-25 19:10:15 +0000462static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
463 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700464{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000466 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700467 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000468 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000469 u64 pkts, bytes;
470 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700471 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472
Sathya Perla3abcded2010-10-03 22:12:27 -0700473 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000474 const struct be_rx_stats *rx_stats = rx_stats(rxo);
475 do {
476 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
477 pkts = rx_stats(rxo)->rx_pkts;
478 bytes = rx_stats(rxo)->rx_bytes;
479 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
480 stats->rx_packets += pkts;
481 stats->rx_bytes += bytes;
482 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
483 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
484 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700485 }
486
Sathya Perla3c8def92011-06-12 20:01:58 +0000487 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 const struct be_tx_stats *tx_stats = tx_stats(txo);
489 do {
490 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
491 pkts = tx_stats(txo)->tx_pkts;
492 bytes = tx_stats(txo)->tx_bytes;
493 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
494 stats->tx_packets += pkts;
495 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000496 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497
498 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_alignment_symbol_errors +
501 drvs->rx_in_range_errors +
502 drvs->rx_out_range_errors +
503 drvs->rx_frame_too_long +
504 drvs->rx_dropped_too_small +
505 drvs->rx_dropped_too_short +
506 drvs->rx_dropped_header_too_small +
507 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000508 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700510 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000511 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000512 drvs->rx_out_range_errors +
513 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000514
Sathya Perlaab1594e2011-07-25 19:10:15 +0000515 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516
517 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000518 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000519
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520 /* receiver fifo overrun */
521 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000522 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000523 drvs->rx_input_fifo_overflow_drop +
524 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526}
527
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000528void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 struct net_device *netdev = adapter->netdev;
531
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000532 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000533 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000534 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000536
537 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
538 netif_carrier_on(netdev);
539 else
540 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541}
542
Sathya Perla3c8def92011-06-12 20:01:58 +0000543static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000544 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700545{
Sathya Perla3c8def92011-06-12 20:01:58 +0000546 struct be_tx_stats *stats = tx_stats(txo);
547
Sathya Perlaab1594e2011-07-25 19:10:15 +0000548 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000549 stats->tx_reqs++;
550 stats->tx_wrbs += wrb_cnt;
551 stats->tx_bytes += copied;
552 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000554 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000555 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700556}
557
558/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000559static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
560 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700562 int cnt = (skb->len > skb->data_len);
563
564 cnt += skb_shinfo(skb)->nr_frags;
565
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700566 /* to account for hdr wrb */
567 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000568 if (lancer_chip(adapter) || !(cnt & 1)) {
569 *dummy = false;
570 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* add a dummy to make it an even num */
572 cnt++;
573 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000574 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
576 return cnt;
577}
578
579static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
580{
581 wrb->frag_pa_hi = upper_32_bits(addr);
582 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
583 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000584 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585}
586
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000587static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
588 struct sk_buff *skb)
589{
590 u8 vlan_prio;
591 u16 vlan_tag;
592
593 vlan_tag = vlan_tx_tag_get(skb);
594 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
595 /* If vlan priority provided by OS is NOT in available bmap */
596 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
597 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
598 adapter->recommended_prio;
599
600 return vlan_tag;
601}
602
Somnath Kotur93040ae2012-06-26 22:32:10 +0000603static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
604{
605 return vlan_tx_tag_present(skb) || adapter->pvid;
606}
607
Somnath Koturcc4ce022010-10-21 07:11:14 -0700608static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
609 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000611 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700612
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 memset(hdr, 0, sizeof(*hdr));
614
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
616
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000617 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
620 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000621 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000622 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000623 if (lancer_chip(adapter) && adapter->sli_family ==
624 LANCER_A0_SLI_FAMILY) {
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
626 if (is_tcp_pkt(skb))
627 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
628 tcpcs, hdr, 1);
629 else if (is_udp_pkt(skb))
630 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
631 udpcs, hdr, 1);
632 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
634 if (is_tcp_pkt(skb))
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
636 else if (is_udp_pkt(skb))
637 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
638 }
639
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700640 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000642 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700643 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 }
645
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
649 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
650}
651
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000653 bool unmap_single)
654{
655 dma_addr_t dma;
656
657 be_dws_le_to_cpu(wrb, sizeof(*wrb));
658
659 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000660 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000661 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000662 dma_unmap_single(dev, dma, wrb->frag_len,
663 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000664 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000665 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000666 }
667}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
Sathya Perla3c8def92011-06-12 20:01:58 +0000669static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
671{
Sathya Perla7101e112010-03-22 20:41:12 +0000672 dma_addr_t busaddr;
673 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 struct be_eth_wrb *wrb;
677 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000678 bool map_single = false;
679 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 hdr = queue_head_node(txq);
682 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000683 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
David S. Millerebc8d2a2009-06-09 01:01:31 -0700685 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700686 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000687 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
688 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000689 goto dma_err;
690 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700691 wrb = queue_head_node(txq);
692 wrb_fill(wrb, busaddr, len);
693 be_dws_cpu_to_le(wrb, sizeof(*wrb));
694 queue_head_inc(txq);
695 copied += len;
696 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
David S. Millerebc8d2a2009-06-09 01:01:31 -0700698 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000699 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700700 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000701 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000702 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000704 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700705 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000706 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700707 be_dws_cpu_to_le(wrb, sizeof(*wrb));
708 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000709 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 }
711
712 if (dummy_wrb) {
713 wrb = queue_head_node(txq);
714 wrb_fill(wrb, 0, 0);
715 be_dws_cpu_to_le(wrb, sizeof(*wrb));
716 queue_head_inc(txq);
717 }
718
Somnath Koturcc4ce022010-10-21 07:11:14 -0700719 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 be_dws_cpu_to_le(hdr, sizeof(*hdr));
721
722 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000723dma_err:
724 txq->head = map_head;
725 while (copied) {
726 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000727 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000728 map_single = false;
729 copied -= wrb->frag_len;
730 queue_head_inc(txq);
731 }
732 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733}
734
Somnath Kotur93040ae2012-06-26 22:32:10 +0000735static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
736 struct sk_buff *skb)
737{
738 u16 vlan_tag = 0;
739
740 skb = skb_share_check(skb, GFP_ATOMIC);
741 if (unlikely(!skb))
742 return skb;
743
744 if (vlan_tx_tag_present(skb)) {
745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746 __vlan_put_tag(skb, vlan_tag);
747 skb->vlan_tci = 0;
748 }
749
750 return skb;
751}
752
Stephen Hemminger613573252009-08-31 19:50:58 +0000753static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700754 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755{
756 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000757 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
758 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000759 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000761 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762 bool dummy_wrb, stopped = false;
763
Somnath Kotur93040ae2012-06-26 22:32:10 +0000764 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
765 VLAN_ETH_HLEN : ETH_HLEN;
766
767 /* HW has a bug which considers padding bytes as legal
768 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000769 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000770 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
771 is_ipv4_pkt(skb)) {
772 ip = (struct iphdr *)ip_hdr(skb);
773 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
774 }
775
776 /* HW has a bug wherein it will calculate CSUM for VLAN
777 * pkts even though it is disabled.
778 * Manually insert VLAN in pkt.
779 */
780 if (skb->ip_summed != CHECKSUM_PARTIAL &&
781 be_vlan_tag_chk(adapter, skb)) {
782 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000783 if (unlikely(!skb))
784 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000785 }
786
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000787 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788
Sathya Perla3c8def92011-06-12 20:01:58 +0000789 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000790 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000791 int gso_segs = skb_shinfo(skb)->gso_segs;
792
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000793 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000794 BUG_ON(txo->sent_skb_list[start]);
795 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000797 /* Ensure txq has space for the next skb; Else stop the queue
798 * *BEFORE* ringing the tx doorbell, so that we serialze the
799 * tx compls of the current transmit which'll wake up the queue
800 */
Sathya Perla7101e112010-03-22 20:41:12 +0000801 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000802 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
803 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000804 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000805 stopped = true;
806 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000808 be_txq_notify(adapter, txq->id, wrb_cnt);
809
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000810 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000811 } else {
812 txq->head = start;
813 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000815tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 return NETDEV_TX_OK;
817}
818
819static int be_change_mtu(struct net_device *netdev, int new_mtu)
820{
821 struct be_adapter *adapter = netdev_priv(netdev);
822 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000823 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
824 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 dev_info(&adapter->pdev->dev,
826 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000827 BE_MIN_MTU,
828 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829 return -EINVAL;
830 }
831 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
832 netdev->mtu, new_mtu);
833 netdev->mtu = new_mtu;
834 return 0;
835}
836
837/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000838 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
839 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 */
Sathya Perla10329df2012-06-05 19:37:18 +0000841static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842{
Sathya Perla10329df2012-06-05 19:37:18 +0000843 u16 vids[BE_NUM_VLANS_SUPPORTED];
844 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000845 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000846
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000847 /* No need to further configure vids if in promiscuous mode */
848 if (adapter->promiscuous)
849 return 0;
850
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000851 if (adapter->vlans_added > adapter->max_vlans)
852 goto set_vlan_promisc;
853
854 /* Construct VLAN Table to give to HW */
855 for (i = 0; i < VLAN_N_VID; i++)
856 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000857 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000858
859 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000860 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000861
862 /* Set to VLAN promisc mode as setting VLAN filter failed */
863 if (status) {
864 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
865 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
866 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700867 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000868
Sathya Perlab31c50a2009-09-17 10:30:13 -0700869 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000870
871set_vlan_promisc:
872 status = be_cmd_vlan_config(adapter, adapter->if_handle,
873 NULL, 0, 1, 1);
874 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875}
876
Jiri Pirko8e586132011-12-08 19:52:37 -0500877static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878{
879 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000880 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000882 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000883 status = -EINVAL;
884 goto ret;
885 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000886
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000887 /* Packets with VID 0 are always received by Lancer by default */
888 if (lancer_chip(adapter) && vid == 0)
889 goto ret;
890
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700891 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000892 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000893 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500894
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000895 if (!status)
896 adapter->vlans_added++;
897 else
898 adapter->vlan_tag[vid] = 0;
899ret:
900 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901}
902
Jiri Pirko8e586132011-12-08 19:52:37 -0500903static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700904{
905 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000906 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000908 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000909 status = -EINVAL;
910 goto ret;
911 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000912
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000913 /* Packets with VID 0 are always received by Lancer by default */
914 if (lancer_chip(adapter) && vid == 0)
915 goto ret;
916
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000918 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000919 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500920
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000921 if (!status)
922 adapter->vlans_added--;
923 else
924 adapter->vlan_tag[vid] = 1;
925ret:
926 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700927}
928
Sathya Perlaa54769f2011-10-24 02:45:00 +0000929static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930{
931 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000932 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933
934 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000936 adapter->promiscuous = true;
937 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700938 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000939
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300940 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000941 if (adapter->promiscuous) {
942 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000943 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000944
945 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000946 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000947 }
948
Sathya Perlae7b909a2009-11-22 22:01:10 +0000949 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000950 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000951 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000952 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000953 goto done;
954 }
955
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000956 if (netdev_uc_count(netdev) != adapter->uc_macs) {
957 struct netdev_hw_addr *ha;
958 int i = 1; /* First slot is claimed by the Primary MAC */
959
960 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
961 be_cmd_pmac_del(adapter, adapter->if_handle,
962 adapter->pmac_id[i], 0);
963 }
964
965 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
966 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
967 adapter->promiscuous = true;
968 goto done;
969 }
970
971 netdev_for_each_uc_addr(ha, adapter->netdev) {
972 adapter->uc_macs++; /* First slot is for Primary MAC */
973 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
974 adapter->if_handle,
975 &adapter->pmac_id[adapter->uc_macs], 0);
976 }
977 }
978
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000979 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
980
981 /* Set to MCAST promisc mode if setting MULTICAST address fails */
982 if (status) {
983 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
984 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
985 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
986 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000987done:
988 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989}
990
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000991static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
992{
993 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000994 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000995 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000996 bool active_mac = false;
997 u32 pmac_id;
998 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000999
Sathya Perla11ac75e2011-12-13 00:58:50 +00001000 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001001 return -EPERM;
1002
Sathya Perla11ac75e2011-12-13 00:58:50 +00001003 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001004 return -EINVAL;
1005
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001006 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001007 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1008 &pmac_id, vf + 1);
1009 if (!status && active_mac)
1010 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1011 pmac_id, vf + 1);
1012
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001013 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1014 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001015 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1016 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001017
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1019 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001020 }
1021
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001022 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001023 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1024 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001025 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001026 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001027
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001028 return status;
1029}
1030
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001031static int be_get_vf_config(struct net_device *netdev, int vf,
1032 struct ifla_vf_info *vi)
1033{
1034 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001035 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001036
Sathya Perla11ac75e2011-12-13 00:58:50 +00001037 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001038 return -EPERM;
1039
Sathya Perla11ac75e2011-12-13 00:58:50 +00001040 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001041 return -EINVAL;
1042
1043 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001044 vi->tx_rate = vf_cfg->tx_rate;
1045 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001046 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001047 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001048
1049 return 0;
1050}
1051
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001052static int be_set_vf_vlan(struct net_device *netdev,
1053 int vf, u16 vlan, u8 qos)
1054{
1055 struct be_adapter *adapter = netdev_priv(netdev);
1056 int status = 0;
1057
Sathya Perla11ac75e2011-12-13 00:58:50 +00001058 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001059 return -EPERM;
1060
Sathya Perla11ac75e2011-12-13 00:58:50 +00001061 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001062 return -EINVAL;
1063
1064 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001065 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1066 /* If this is new value, program it. Else skip. */
1067 adapter->vf_cfg[vf].vlan_tag = vlan;
1068
1069 status = be_cmd_set_hsw_config(adapter, vlan,
1070 vf + 1, adapter->vf_cfg[vf].if_handle);
1071 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001072 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001073 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001074 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001075 vlan = adapter->vf_cfg[vf].def_vid;
1076 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1077 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001078 }
1079
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001080
1081 if (status)
1082 dev_info(&adapter->pdev->dev,
1083 "VLAN %d config on VF %d failed\n", vlan, vf);
1084 return status;
1085}
1086
Ajit Khapardee1d18732010-07-23 01:52:13 +00001087static int be_set_vf_tx_rate(struct net_device *netdev,
1088 int vf, int rate)
1089{
1090 struct be_adapter *adapter = netdev_priv(netdev);
1091 int status = 0;
1092
Sathya Perla11ac75e2011-12-13 00:58:50 +00001093 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001094 return -EPERM;
1095
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001096 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001097 return -EINVAL;
1098
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001099 if (rate < 100 || rate > 10000) {
1100 dev_err(&adapter->pdev->dev,
1101 "tx rate must be between 100 and 10000 Mbps\n");
1102 return -EINVAL;
1103 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001104
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001105 if (lancer_chip(adapter))
1106 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1107 else
1108 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001109
1110 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001111 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001112 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001113 else
1114 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001115 return status;
1116}
1117
Sathya Perla39f1d942012-05-08 19:41:24 +00001118static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1119{
1120 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001121 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001122 u16 offset, stride;
1123
1124 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001125 if (!pos)
1126 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001127 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1128 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1129
1130 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1131 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001132 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001133 vfs++;
1134 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1135 assigned_vfs++;
1136 }
1137 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1138 }
1139 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1140}
1141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001144 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001145 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001146 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001147 u64 pkts;
1148 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001149
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001150 if (!eqo->enable_aic) {
1151 eqd = eqo->eqd;
1152 goto modify_eqd;
1153 }
1154
1155 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001156 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001158 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1159
Sathya Perla4097f662009-03-24 16:40:13 -07001160 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001161 if (time_before(now, stats->rx_jiffies)) {
1162 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001163 return;
1164 }
1165
Sathya Perlaac124ff2011-07-25 19:10:14 +00001166 /* Update once a second */
1167 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001168 return;
1169
Sathya Perlaab1594e2011-07-25 19:10:15 +00001170 do {
1171 start = u64_stats_fetch_begin_bh(&stats->sync);
1172 pkts = stats->rx_pkts;
1173 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1174
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001175 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001176 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001177 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001178 eqd = (stats->rx_pps / 110000) << 3;
1179 eqd = min(eqd, eqo->max_eqd);
1180 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001181 if (eqd < 10)
1182 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001183
1184modify_eqd:
1185 if (eqd != eqo->cur_eqd) {
1186 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1187 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001188 }
Sathya Perla4097f662009-03-24 16:40:13 -07001189}
1190
Sathya Perla3abcded2010-10-03 22:12:27 -07001191static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001193{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001194 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001195
Sathya Perlaab1594e2011-07-25 19:10:15 +00001196 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001197 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001199 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001200 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001201 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001202 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001203 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001204 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205}
1206
Sathya Perla2e588f82011-03-11 02:49:26 +00001207static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001208{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001209 /* L4 checksum is not reliable for non TCP/UDP packets.
1210 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001211 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1212 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001213}
1214
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001215static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1216 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001218 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001220 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221
Sathya Perla3abcded2010-10-03 22:12:27 -07001222 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223 BUG_ON(!rx_page_info->page);
1224
Ajit Khaparde205859a2010-02-09 01:34:21 +00001225 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001226 dma_unmap_page(&adapter->pdev->dev,
1227 dma_unmap_addr(rx_page_info, bus),
1228 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001229 rx_page_info->last_page_user = false;
1230 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231
1232 atomic_dec(&rxq->used);
1233 return rx_page_info;
1234}
1235
1236/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001237static void be_rx_compl_discard(struct be_rx_obj *rxo,
1238 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239{
Sathya Perla3abcded2010-10-03 22:12:27 -07001240 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001242 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001244 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001245 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001246 put_page(page_info->page);
1247 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001248 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 }
1250}
1251
1252/*
1253 * skb_fill_rx_data forms a complete skb for an ether frame
1254 * indicated by rxcp.
1255 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001256static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1257 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258{
Sathya Perla3abcded2010-10-03 22:12:27 -07001259 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001261 u16 i, j;
1262 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263 u8 *start;
1264
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001265 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266 start = page_address(page_info->page) + page_info->page_offset;
1267 prefetch(start);
1268
1269 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001270 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272 skb->len = curr_frag_len;
1273 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001274 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275 /* Complete packet has now been moved to data */
1276 put_page(page_info->page);
1277 skb->data_len = 0;
1278 skb->tail += curr_frag_len;
1279 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001280 hdr_len = ETH_HLEN;
1281 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001283 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 skb_shinfo(skb)->frags[0].page_offset =
1285 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001286 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001288 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289 skb->tail += hdr_len;
1290 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001291 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292
Sathya Perla2e588f82011-03-11 02:49:26 +00001293 if (rxcp->pkt_size <= rx_frag_size) {
1294 BUG_ON(rxcp->num_rcvd != 1);
1295 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296 }
1297
1298 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001299 index_inc(&rxcp->rxq_idx, rxq->len);
1300 remaining = rxcp->pkt_size - curr_frag_len;
1301 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001302 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001303 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001305 /* Coalesce all frags from the same physical page in one slot */
1306 if (page_info->page_offset == 0) {
1307 /* Fresh page */
1308 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001309 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001310 skb_shinfo(skb)->frags[j].page_offset =
1311 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001312 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001313 skb_shinfo(skb)->nr_frags++;
1314 } else {
1315 put_page(page_info->page);
1316 }
1317
Eric Dumazet9e903e02011-10-18 21:00:24 +00001318 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319 skb->len += curr_frag_len;
1320 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001321 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001322 remaining -= curr_frag_len;
1323 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001324 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001326 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327}
1328
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001329/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001330static void be_rx_compl_process(struct be_rx_obj *rxo,
1331 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001333 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001334 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001336
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001337 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001338 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001339 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001340 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 return;
1342 }
1343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001344 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001346 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001347 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001348 else
1349 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001351 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001352 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001353 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001354 skb->rxhash = rxcp->rss_hash;
1355
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356
Jiri Pirko343e43c2011-08-25 02:50:51 +00001357 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001358 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1359
1360 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361}
1362
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001363/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001364void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1365 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001367 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001369 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001370 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001371 u16 remaining, curr_frag_len;
1372 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001373
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001374 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001375 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001376 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001377 return;
1378 }
1379
Sathya Perla2e588f82011-03-11 02:49:26 +00001380 remaining = rxcp->pkt_size;
1381 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001382 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383
1384 curr_frag_len = min(remaining, rx_frag_size);
1385
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001386 /* Coalesce all frags from the same physical page in one slot */
1387 if (i == 0 || page_info->page_offset == 0) {
1388 /* First frag or Fresh page */
1389 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001390 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001391 skb_shinfo(skb)->frags[j].page_offset =
1392 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001393 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001394 } else {
1395 put_page(page_info->page);
1396 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001397 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001398 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001400 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 memset(page_info, 0, sizeof(*page_info));
1402 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001403 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001405 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001406 skb->len = rxcp->pkt_size;
1407 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001408 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001409 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001410 if (adapter->netdev->features & NETIF_F_RXHASH)
1411 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001412
Jiri Pirko343e43c2011-08-25 02:50:51 +00001413 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001414 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1415
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001416 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417}
1418
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001419static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1420 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421{
Sathya Perla2e588f82011-03-11 02:49:26 +00001422 rxcp->pkt_size =
1423 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1424 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1425 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1426 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001427 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 rxcp->ip_csum =
1429 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1430 rxcp->l4_csum =
1431 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1432 rxcp->ipv6 =
1433 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1434 rxcp->rxq_idx =
1435 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1436 rxcp->num_rcvd =
1437 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1438 rxcp->pkt_type =
1439 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001440 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001441 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001442 if (rxcp->vlanf) {
1443 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001444 compl);
1445 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1446 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001447 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001448 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001449}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001451static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1452 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001453{
1454 rxcp->pkt_size =
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1456 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1457 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1458 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001459 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001460 rxcp->ip_csum =
1461 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1462 rxcp->l4_csum =
1463 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1464 rxcp->ipv6 =
1465 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1466 rxcp->rxq_idx =
1467 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1468 rxcp->num_rcvd =
1469 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1470 rxcp->pkt_type =
1471 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001472 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001473 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001474 if (rxcp->vlanf) {
1475 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001476 compl);
1477 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1478 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001479 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001480 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001481}
1482
1483static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1484{
1485 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1486 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1487 struct be_adapter *adapter = rxo->adapter;
1488
1489 /* For checking the valid bit it is Ok to use either definition as the
1490 * valid bit is at the same position in both v0 and v1 Rx compl */
1491 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 return NULL;
1493
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001494 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001495 be_dws_le_to_cpu(compl, sizeof(*compl));
1496
1497 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001498 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001499 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001500 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001501
Sathya Perla15d72182011-03-21 20:49:26 +00001502 if (rxcp->vlanf) {
1503 /* vlanf could be wrongly set in some cards.
1504 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001505 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001506 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001507
Sathya Perla15d72182011-03-21 20:49:26 +00001508 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001509 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001510
Somnath Kotur939cf302011-08-18 21:51:49 -07001511 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001512 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001513 rxcp->vlanf = 0;
1514 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001515
1516 /* As the compl has been parsed, reset it; we wont touch it again */
1517 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518
Sathya Perla3abcded2010-10-03 22:12:27 -07001519 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 return rxcp;
1521}
1522
Eric Dumazet1829b082011-03-01 05:48:12 +00001523static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001526
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001528 gfp |= __GFP_COMP;
1529 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530}
1531
1532/*
1533 * Allocate a page, split it to fragments of size rx_frag_size and post as
1534 * receive buffers to BE
1535 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001536static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537{
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001539 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001540 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 struct page *pagep = NULL;
1542 struct be_eth_rx_d *rxd;
1543 u64 page_dmaaddr = 0, frag_dmaaddr;
1544 u32 posted, page_offset = 0;
1545
Sathya Perla3abcded2010-10-03 22:12:27 -07001546 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1548 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001549 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001551 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 break;
1553 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001554 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1555 0, adapter->big_page_size,
1556 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557 page_info->page_offset = 0;
1558 } else {
1559 get_page(pagep);
1560 page_info->page_offset = page_offset + rx_frag_size;
1561 }
1562 page_offset = page_info->page_offset;
1563 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001564 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1566
1567 rxd = queue_head_node(rxq);
1568 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1569 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570
1571 /* Any space left in the current big page for another frag? */
1572 if ((page_offset + rx_frag_size + rx_frag_size) >
1573 adapter->big_page_size) {
1574 pagep = NULL;
1575 page_info->last_page_user = true;
1576 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001577
1578 prev_page_info = page_info;
1579 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001580 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 }
1582 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001583 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584
1585 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001587 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001588 } else if (atomic_read(&rxq->used) == 0) {
1589 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001590 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592}
1593
Sathya Perla5fb379e2009-06-18 00:02:59 +00001594static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1597
1598 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1599 return NULL;
1600
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001601 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1603
1604 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1605
1606 queue_tail_inc(tx_cq);
1607 return txcp;
1608}
1609
Sathya Perla3c8def92011-06-12 20:01:58 +00001610static u16 be_tx_compl_process(struct be_adapter *adapter,
1611 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612{
Sathya Perla3c8def92011-06-12 20:01:58 +00001613 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001614 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001615 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001617 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1618 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001620 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001622 sent_skbs[txq->tail] = NULL;
1623
1624 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001625 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001627 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001629 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001630 unmap_tx_frag(&adapter->pdev->dev, wrb,
1631 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001632 unmap_skb_hdr = false;
1633
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 num_wrbs++;
1635 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001636 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001639 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640}
1641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001642/* Return the number of events in the event queue */
1643static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001644{
1645 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001646 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001647
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001648 do {
1649 eqe = queue_tail_node(&eqo->q);
1650 if (eqe->evt == 0)
1651 break;
1652
1653 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001654 eqe->evt = 0;
1655 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001656 queue_tail_inc(&eqo->q);
1657 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001658
1659 return num;
1660}
1661
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001662static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001663{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664 bool rearm = false;
1665 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001666
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001667 /* Deal with any spurious interrupts that come without events */
1668 if (!num)
1669 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001670
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001671 if (num || msix_enabled(eqo->adapter))
1672 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1673
Sathya Perla859b1e42009-08-10 03:43:51 +00001674 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001675 napi_schedule(&eqo->napi);
1676
1677 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001678}
1679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001680/* Leaves the EQ is disarmed state */
1681static void be_eq_clean(struct be_eq_obj *eqo)
1682{
1683 int num = events_get(eqo);
1684
1685 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1686}
1687
1688static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689{
1690 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001691 struct be_queue_info *rxq = &rxo->q;
1692 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001693 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694 u16 tail;
1695
1696 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001697 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001698 be_rx_compl_discard(rxo, rxcp);
1699 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700 }
1701
1702 /* Then free posted rx buffer that were not used */
1703 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001704 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001705 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706 put_page(page_info->page);
1707 memset(page_info, 0, sizeof(*page_info));
1708 }
1709 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001710 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001711}
1712
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001713static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001715 struct be_tx_obj *txo;
1716 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001717 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001718 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001719 struct sk_buff *sent_skb;
1720 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001721 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
Sathya Perlaa8e91792009-08-10 03:42:43 +00001723 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1724 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001725 pending_txqs = adapter->num_tx_qs;
1726
1727 for_all_tx_queues(adapter, txo, i) {
1728 txq = &txo->q;
1729 while ((txcp = be_tx_compl_get(&txo->cq))) {
1730 end_idx =
1731 AMAP_GET_BITS(struct amap_eth_tx_compl,
1732 wrb_index, txcp);
1733 num_wrbs += be_tx_compl_process(adapter, txo,
1734 end_idx);
1735 cmpl++;
1736 }
1737 if (cmpl) {
1738 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1739 atomic_sub(num_wrbs, &txq->used);
1740 cmpl = 0;
1741 num_wrbs = 0;
1742 }
1743 if (atomic_read(&txq->used) == 0)
1744 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001745 }
1746
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001747 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001748 break;
1749
1750 mdelay(1);
1751 } while (true);
1752
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001753 for_all_tx_queues(adapter, txo, i) {
1754 txq = &txo->q;
1755 if (atomic_read(&txq->used))
1756 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1757 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001758
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001759 /* free posted tx for which compls will never arrive */
1760 while (atomic_read(&txq->used)) {
1761 sent_skb = txo->sent_skb_list[txq->tail];
1762 end_idx = txq->tail;
1763 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1764 &dummy_wrb);
1765 index_adv(&end_idx, num_wrbs - 1, txq->len);
1766 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1767 atomic_sub(num_wrbs, &txq->used);
1768 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001769 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770}
1771
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001772static void be_evt_queues_destroy(struct be_adapter *adapter)
1773{
1774 struct be_eq_obj *eqo;
1775 int i;
1776
1777 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001778 if (eqo->q.created) {
1779 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001780 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001781 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001782 be_queue_free(adapter, &eqo->q);
1783 }
1784}
1785
1786static int be_evt_queues_create(struct be_adapter *adapter)
1787{
1788 struct be_queue_info *eq;
1789 struct be_eq_obj *eqo;
1790 int i, rc;
1791
1792 adapter->num_evt_qs = num_irqs(adapter);
1793
1794 for_all_evt_queues(adapter, eqo, i) {
1795 eqo->adapter = adapter;
1796 eqo->tx_budget = BE_TX_BUDGET;
1797 eqo->idx = i;
1798 eqo->max_eqd = BE_MAX_EQD;
1799 eqo->enable_aic = true;
1800
1801 eq = &eqo->q;
1802 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1803 sizeof(struct be_eq_entry));
1804 if (rc)
1805 return rc;
1806
1807 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1808 if (rc)
1809 return rc;
1810 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001811 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001812}
1813
Sathya Perla5fb379e2009-06-18 00:02:59 +00001814static void be_mcc_queues_destroy(struct be_adapter *adapter)
1815{
1816 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001817
Sathya Perla8788fdc2009-07-27 22:52:03 +00001818 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001819 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001820 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001821 be_queue_free(adapter, q);
1822
Sathya Perla8788fdc2009-07-27 22:52:03 +00001823 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001824 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001825 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001826 be_queue_free(adapter, q);
1827}
1828
1829/* Must be called only after TX qs are created as MCC shares TX EQ */
1830static int be_mcc_queues_create(struct be_adapter *adapter)
1831{
1832 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001833
Sathya Perla8788fdc2009-07-27 22:52:03 +00001834 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001835 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001836 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001837 goto err;
1838
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001839 /* Use the default EQ for MCC completions */
1840 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001841 goto mcc_cq_free;
1842
Sathya Perla8788fdc2009-07-27 22:52:03 +00001843 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001844 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1845 goto mcc_cq_destroy;
1846
Sathya Perla8788fdc2009-07-27 22:52:03 +00001847 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001848 goto mcc_q_free;
1849
1850 return 0;
1851
1852mcc_q_free:
1853 be_queue_free(adapter, q);
1854mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001855 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001856mcc_cq_free:
1857 be_queue_free(adapter, cq);
1858err:
1859 return -1;
1860}
1861
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862static void be_tx_queues_destroy(struct be_adapter *adapter)
1863{
1864 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001865 struct be_tx_obj *txo;
1866 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867
Sathya Perla3c8def92011-06-12 20:01:58 +00001868 for_all_tx_queues(adapter, txo, i) {
1869 q = &txo->q;
1870 if (q->created)
1871 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1872 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873
Sathya Perla3c8def92011-06-12 20:01:58 +00001874 q = &txo->cq;
1875 if (q->created)
1876 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1877 be_queue_free(adapter, q);
1878 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879}
1880
Sathya Perladafc0fe2011-10-24 02:45:02 +00001881static int be_num_txqs_want(struct be_adapter *adapter)
1882{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001883 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1884 be_is_mc(adapter) ||
1885 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perla39f1d942012-05-08 19:41:24 +00001886 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001887 return 1;
1888 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001889 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001890}
1891
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001892static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001894 struct be_queue_info *cq, *eq;
1895 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001896 struct be_tx_obj *txo;
1897 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898
Sathya Perladafc0fe2011-10-24 02:45:02 +00001899 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001900 if (adapter->num_tx_qs != MAX_TX_QS) {
1901 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001902 netif_set_real_num_tx_queues(adapter->netdev,
1903 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001904 rtnl_unlock();
1905 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001906
Sathya Perla3c8def92011-06-12 20:01:58 +00001907 for_all_tx_queues(adapter, txo, i) {
1908 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1910 sizeof(struct be_eth_tx_compl));
1911 if (status)
1912 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914 /* If num_evt_qs is less than num_tx_qs, then more than
1915 * one txq share an eq
1916 */
1917 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1918 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1919 if (status)
1920 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001921 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923}
1924
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925static int be_tx_qs_create(struct be_adapter *adapter)
1926{
1927 struct be_tx_obj *txo;
1928 int i, status;
1929
1930 for_all_tx_queues(adapter, txo, i) {
1931 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1932 sizeof(struct be_eth_wrb));
1933 if (status)
1934 return status;
1935
1936 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1937 if (status)
1938 return status;
1939 }
1940
Sathya Perlad3791422012-09-28 04:39:44 +00001941 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1942 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943 return 0;
1944}
1945
1946static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947{
1948 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001949 struct be_rx_obj *rxo;
1950 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951
Sathya Perla3abcded2010-10-03 22:12:27 -07001952 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001953 q = &rxo->cq;
1954 if (q->created)
1955 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1956 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958}
1959
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001960static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001961{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001963 struct be_rx_obj *rxo;
1964 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001966 /* We'll create as many RSS rings as there are irqs.
1967 * But when there's only one irq there's no use creating RSS rings
1968 */
1969 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1970 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001971 if (adapter->num_rx_qs != MAX_RX_QS) {
1972 rtnl_lock();
1973 netif_set_real_num_rx_queues(adapter->netdev,
1974 adapter->num_rx_qs);
1975 rtnl_unlock();
1976 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001977
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001979 for_all_rx_queues(adapter, rxo, i) {
1980 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001981 cq = &rxo->cq;
1982 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1983 sizeof(struct be_eth_rx_compl));
1984 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001987 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1988 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001989 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001990 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992
Sathya Perlad3791422012-09-28 04:39:44 +00001993 dev_info(&adapter->pdev->dev,
1994 "created %d RSS queue(s) and 1 default RX queue\n",
1995 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001997}
1998
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001999static irqreturn_t be_intx(int irq, void *dev)
2000{
2001 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 /* With INTx only one EQ is used */
2005 num_evts = event_handle(&adapter->eq_obj[0]);
2006 if (num_evts)
2007 return IRQ_HANDLED;
2008 else
2009 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010}
2011
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002014 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017 return IRQ_HANDLED;
2018}
2019
Sathya Perla2e588f82011-03-11 02:49:26 +00002020static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021{
Sathya Perla2e588f82011-03-11 02:49:26 +00002022 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023}
2024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2026 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027{
Sathya Perla3abcded2010-10-03 22:12:27 -07002028 struct be_adapter *adapter = rxo->adapter;
2029 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002030 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031 u32 work_done;
2032
2033 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002034 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 if (!rxcp)
2036 break;
2037
Sathya Perla12004ae2011-08-02 19:57:46 +00002038 /* Is it a flush compl that has no data */
2039 if (unlikely(rxcp->num_rcvd == 0))
2040 goto loop_continue;
2041
2042 /* Discard compl with partial DMA Lancer B0 */
2043 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002045 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002046 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002047
Sathya Perla12004ae2011-08-02 19:57:46 +00002048 /* On BE drop pkts that arrive due to imperfect filtering in
2049 * promiscuous mode on some skews
2050 */
2051 if (unlikely(rxcp->port != adapter->port_num &&
2052 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002053 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002054 goto loop_continue;
2055 }
2056
2057 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002059 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002060 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002061loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002062 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063 }
2064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002065 if (work_done) {
2066 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002068 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2069 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002071
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072 return work_done;
2073}
2074
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002075static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2076 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002079 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002081 for (work_done = 0; work_done < budget; work_done++) {
2082 txcp = be_tx_compl_get(&txo->cq);
2083 if (!txcp)
2084 break;
2085 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002086 AMAP_GET_BITS(struct amap_eth_tx_compl,
2087 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002088 }
2089
2090 if (work_done) {
2091 be_cq_notify(adapter, txo->cq.id, true, work_done);
2092 atomic_sub(num_wrbs, &txo->q.used);
2093
2094 /* As Tx wrbs have been freed up, wake up netdev queue
2095 * if it was stopped due to lack of tx wrbs. */
2096 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2097 atomic_read(&txo->q.used) < txo->q.len / 2) {
2098 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002099 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002100
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002101 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2102 tx_stats(txo)->tx_compl += work_done;
2103 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2104 }
2105 return (work_done < budget); /* Done */
2106}
Sathya Perla3c8def92011-06-12 20:01:58 +00002107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002108int be_poll(struct napi_struct *napi, int budget)
2109{
2110 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2111 struct be_adapter *adapter = eqo->adapter;
2112 int max_work = 0, work, i;
2113 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 /* Process all TXQs serviced by this EQ */
2116 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2117 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2118 eqo->tx_budget, i);
2119 if (!tx_done)
2120 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 }
2122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002123 /* This loop will iterate twice for EQ0 in which
2124 * completions of the last RXQ (default one) are also processed
2125 * For other EQs the loop iterates only once
2126 */
2127 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2128 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2129 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002130 }
2131
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 if (is_mcc_eqo(eqo))
2133 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002134
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135 if (max_work < budget) {
2136 napi_complete(napi);
2137 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2138 } else {
2139 /* As we'll continue in polling mode, count and clear events */
2140 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002141 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143}
2144
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002145void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002146{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002147 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2148 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002149 u32 i;
2150
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002151 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002152 return;
2153
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002154 if (lancer_chip(adapter)) {
2155 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2156 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2157 sliport_err1 = ioread32(adapter->db +
2158 SLIPORT_ERROR1_OFFSET);
2159 sliport_err2 = ioread32(adapter->db +
2160 SLIPORT_ERROR2_OFFSET);
2161 }
2162 } else {
2163 pci_read_config_dword(adapter->pdev,
2164 PCICFG_UE_STATUS_LOW, &ue_lo);
2165 pci_read_config_dword(adapter->pdev,
2166 PCICFG_UE_STATUS_HIGH, &ue_hi);
2167 pci_read_config_dword(adapter->pdev,
2168 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2169 pci_read_config_dword(adapter->pdev,
2170 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002171
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002172 ue_lo = (ue_lo & ~ue_lo_mask);
2173 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002174 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002175
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002176 /* On certain platforms BE hardware can indicate spurious UEs.
2177 * Allow the h/w to stop working completely in case of a real UE.
2178 * Hence not setting the hw_error for UE detection.
2179 */
2180 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002181 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002182 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002183 "Error detected in the card\n");
2184 }
2185
2186 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2187 dev_err(&adapter->pdev->dev,
2188 "ERR: sliport status 0x%x\n", sliport_status);
2189 dev_err(&adapter->pdev->dev,
2190 "ERR: sliport error1 0x%x\n", sliport_err1);
2191 dev_err(&adapter->pdev->dev,
2192 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002193 }
2194
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002195 if (ue_lo) {
2196 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2197 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002198 dev_err(&adapter->pdev->dev,
2199 "UE: %s bit set\n", ue_status_low_desc[i]);
2200 }
2201 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002202
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002203 if (ue_hi) {
2204 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2205 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002206 dev_err(&adapter->pdev->dev,
2207 "UE: %s bit set\n", ue_status_hi_desc[i]);
2208 }
2209 }
2210
2211}
2212
Sathya Perla8d56ff12009-11-22 22:02:26 +00002213static void be_msix_disable(struct be_adapter *adapter)
2214{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002215 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002216 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002217 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002218 }
2219}
2220
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002221static uint be_num_rss_want(struct be_adapter *adapter)
2222{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002223 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002224
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002225 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002226 (lancer_chip(adapter) ||
2227 (!sriov_want(adapter) && be_physfn(adapter)))) {
2228 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002229 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2230 }
2231 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232}
2233
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234static void be_msix_enable(struct be_adapter *adapter)
2235{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002236#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002237 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002238 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 /* If RSS queues are not used, need a vec for default RX Q */
2241 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002242 if (be_roce_supported(adapter)) {
2243 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2244 (num_online_cpus() + 1));
2245 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2246 num_vec += num_roce_vec;
2247 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2248 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002250
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002251 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252 adapter->msix_entries[i].entry = i;
2253
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002254 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002255 if (status == 0) {
2256 goto done;
2257 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002258 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002259 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002260 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002261 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002262 }
Sathya Perlad3791422012-09-28 04:39:44 +00002263
2264 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002265 return;
2266done:
Parav Pandit045508a2012-03-26 14:27:13 +00002267 if (be_roce_supported(adapter)) {
2268 if (num_vec > num_roce_vec) {
2269 adapter->num_msix_vec = num_vec - num_roce_vec;
2270 adapter->num_msix_roce_vec =
2271 num_vec - adapter->num_msix_vec;
2272 } else {
2273 adapter->num_msix_vec = num_vec;
2274 adapter->num_msix_roce_vec = 0;
2275 }
2276 } else
2277 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002278 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002279 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280}
2281
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002282static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286}
2287
2288static int be_msix_register(struct be_adapter *adapter)
2289{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 struct net_device *netdev = adapter->netdev;
2291 struct be_eq_obj *eqo;
2292 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002294 for_all_evt_queues(adapter, eqo, i) {
2295 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2296 vec = be_msix_vec_get(adapter, eqo);
2297 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002298 if (status)
2299 goto err_msix;
2300 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002301
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002303err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2305 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2306 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2307 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002308 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 return status;
2310}
2311
2312static int be_irq_register(struct be_adapter *adapter)
2313{
2314 struct net_device *netdev = adapter->netdev;
2315 int status;
2316
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002317 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318 status = be_msix_register(adapter);
2319 if (status == 0)
2320 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002321 /* INTx is not supported for VF */
2322 if (!be_physfn(adapter))
2323 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 }
2325
2326 /* INTx */
2327 netdev->irq = adapter->pdev->irq;
2328 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2329 adapter);
2330 if (status) {
2331 dev_err(&adapter->pdev->dev,
2332 "INTx request IRQ failed - err %d\n", status);
2333 return status;
2334 }
2335done:
2336 adapter->isr_registered = true;
2337 return 0;
2338}
2339
2340static void be_irq_unregister(struct be_adapter *adapter)
2341{
2342 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002344 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345
2346 if (!adapter->isr_registered)
2347 return;
2348
2349 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002350 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351 free_irq(netdev->irq, adapter);
2352 goto done;
2353 }
2354
2355 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 for_all_evt_queues(adapter, eqo, i)
2357 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002358
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002359done:
2360 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002361}
2362
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002363static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002364{
2365 struct be_queue_info *q;
2366 struct be_rx_obj *rxo;
2367 int i;
2368
2369 for_all_rx_queues(adapter, rxo, i) {
2370 q = &rxo->q;
2371 if (q->created) {
2372 be_cmd_rxq_destroy(adapter, q);
2373 /* After the rxq is invalidated, wait for a grace time
2374 * of 1ms for all dma to end and the flush compl to
2375 * arrive
2376 */
2377 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002379 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002381 }
2382}
2383
Sathya Perla889cd4b2010-05-30 23:33:45 +00002384static int be_close(struct net_device *netdev)
2385{
2386 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 struct be_eq_obj *eqo;
2388 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002389
Parav Pandit045508a2012-03-26 14:27:13 +00002390 be_roce_dev_close(adapter);
2391
Sathya Perla889cd4b2010-05-30 23:33:45 +00002392 be_async_mcc_disable(adapter);
2393
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002394 if (!lancer_chip(adapter))
2395 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002396
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397 for_all_evt_queues(adapter, eqo, i) {
2398 napi_disable(&eqo->napi);
2399 if (msix_enabled(adapter))
2400 synchronize_irq(be_msix_vec_get(adapter, eqo));
2401 else
2402 synchronize_irq(netdev->irq);
2403 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002404 }
2405
Sathya Perla889cd4b2010-05-30 23:33:45 +00002406 be_irq_unregister(adapter);
2407
Sathya Perla889cd4b2010-05-30 23:33:45 +00002408 /* Wait for all pending tx completions to arrive so that
2409 * all tx skbs are freed.
2410 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002411 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002412
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002414 return 0;
2415}
2416
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002417static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002418{
2419 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002420 int rc, i, j;
2421 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002422
2423 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002424 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2425 sizeof(struct be_eth_rx_d));
2426 if (rc)
2427 return rc;
2428 }
2429
2430 /* The FW would like the default RXQ to be created first */
2431 rxo = default_rxo(adapter);
2432 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2433 adapter->if_handle, false, &rxo->rss_id);
2434 if (rc)
2435 return rc;
2436
2437 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002438 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002439 rx_frag_size, adapter->if_handle,
2440 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002441 if (rc)
2442 return rc;
2443 }
2444
2445 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002446 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2447 for_all_rss_queues(adapter, rxo, i) {
2448 if ((j + i) >= 128)
2449 break;
2450 rsstable[j + i] = rxo->rss_id;
2451 }
2452 }
2453 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002454 if (rc)
2455 return rc;
2456 }
2457
2458 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002459 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002460 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002461 return 0;
2462}
2463
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002464static int be_open(struct net_device *netdev)
2465{
2466 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002468 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002469 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002470 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002471 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002472
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002473 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002474 if (status)
2475 goto err;
2476
Sathya Perla5fb379e2009-06-18 00:02:59 +00002477 be_irq_register(adapter);
2478
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002479 if (!lancer_chip(adapter))
2480 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002481
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002482 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002483 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002484
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002485 for_all_tx_queues(adapter, txo, i)
2486 be_cq_notify(adapter, txo->cq.id, true, 0);
2487
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002488 be_async_mcc_enable(adapter);
2489
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002490 for_all_evt_queues(adapter, eqo, i) {
2491 napi_enable(&eqo->napi);
2492 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2493 }
2494
Sathya Perla323ff712012-09-28 04:39:43 +00002495 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002496 if (!status)
2497 be_link_status_update(adapter, link_status);
2498
Parav Pandit045508a2012-03-26 14:27:13 +00002499 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002500 return 0;
2501err:
2502 be_close(adapter->netdev);
2503 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002504}
2505
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002506static int be_setup_wol(struct be_adapter *adapter, bool enable)
2507{
2508 struct be_dma_mem cmd;
2509 int status = 0;
2510 u8 mac[ETH_ALEN];
2511
2512 memset(mac, 0, ETH_ALEN);
2513
2514 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002515 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2516 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002517 if (cmd.va == NULL)
2518 return -1;
2519 memset(cmd.va, 0, cmd.size);
2520
2521 if (enable) {
2522 status = pci_write_config_dword(adapter->pdev,
2523 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2524 if (status) {
2525 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002526 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002527 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2528 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002529 return status;
2530 }
2531 status = be_cmd_enable_magic_wol(adapter,
2532 adapter->netdev->dev_addr, &cmd);
2533 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2534 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2535 } else {
2536 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2537 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2538 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2539 }
2540
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002541 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002542 return status;
2543}
2544
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002545/*
2546 * Generate a seed MAC address from the PF MAC Address using jhash.
2547 * MAC Address for VFs are assigned incrementally starting from the seed.
2548 * These addresses are programmed in the ASIC by the PF and the VF driver
2549 * queries for the MAC address during its probe.
2550 */
2551static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2552{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002553 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002554 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002555 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002556 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002557
2558 be_vf_eth_addr_generate(adapter, mac);
2559
Sathya Perla11ac75e2011-12-13 00:58:50 +00002560 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002561 if (lancer_chip(adapter)) {
2562 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2563 } else {
2564 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002565 vf_cfg->if_handle,
2566 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002567 }
2568
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002569 if (status)
2570 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002571 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002572 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002573 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002574
2575 mac[5] += 1;
2576 }
2577 return status;
2578}
2579
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002580static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002581{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002582 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002583 u32 vf;
2584
Sathya Perla39f1d942012-05-08 19:41:24 +00002585 if (be_find_vfs(adapter, ASSIGNED)) {
2586 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2587 goto done;
2588 }
2589
Sathya Perla11ac75e2011-12-13 00:58:50 +00002590 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002591 if (lancer_chip(adapter))
2592 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2593 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002594 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2595 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002596
Sathya Perla11ac75e2011-12-13 00:58:50 +00002597 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2598 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002599 pci_disable_sriov(adapter->pdev);
2600done:
2601 kfree(adapter->vf_cfg);
2602 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002603}
2604
Sathya Perlaa54769f2011-10-24 02:45:00 +00002605static int be_clear(struct be_adapter *adapter)
2606{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002607 int i = 1;
2608
Sathya Perla191eb752012-02-23 18:50:13 +00002609 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2610 cancel_delayed_work_sync(&adapter->work);
2611 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2612 }
2613
Sathya Perla11ac75e2011-12-13 00:58:50 +00002614 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002615 be_vf_clear(adapter);
2616
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002617 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2618 be_cmd_pmac_del(adapter, adapter->if_handle,
2619 adapter->pmac_id[i], 0);
2620
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002621 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002622
2623 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002625 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002626 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002627
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002628 kfree(adapter->pmac_id);
2629 adapter->pmac_id = NULL;
2630
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002632 return 0;
2633}
2634
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002635static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2636 u32 *cap_flags, u8 domain)
2637{
2638 bool profile_present = false;
2639 int status;
2640
2641 if (lancer_chip(adapter)) {
2642 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2643 if (!status)
2644 profile_present = true;
2645 }
2646
2647 if (!profile_present)
2648 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2649 BE_IF_FLAGS_MULTICAST;
2650}
2651
Sathya Perla39f1d942012-05-08 19:41:24 +00002652static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002653{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002654 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002655 int vf;
2656
Sathya Perla39f1d942012-05-08 19:41:24 +00002657 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2658 GFP_KERNEL);
2659 if (!adapter->vf_cfg)
2660 return -ENOMEM;
2661
Sathya Perla11ac75e2011-12-13 00:58:50 +00002662 for_all_vfs(adapter, vf_cfg, vf) {
2663 vf_cfg->if_handle = -1;
2664 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002665 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002666 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002667}
2668
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002669static int be_vf_setup(struct be_adapter *adapter)
2670{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002671 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002672 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002673 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002674 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002675 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002676
Sathya Perla39f1d942012-05-08 19:41:24 +00002677 enabled_vfs = be_find_vfs(adapter, ENABLED);
2678 if (enabled_vfs) {
2679 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2680 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2681 return 0;
2682 }
2683
2684 if (num_vfs > adapter->dev_num_vfs) {
2685 dev_warn(dev, "Device supports %d VFs and not %d\n",
2686 adapter->dev_num_vfs, num_vfs);
2687 num_vfs = adapter->dev_num_vfs;
2688 }
2689
2690 status = pci_enable_sriov(adapter->pdev, num_vfs);
2691 if (!status) {
2692 adapter->num_vfs = num_vfs;
2693 } else {
2694 /* Platform doesn't support SRIOV though device supports it */
2695 dev_warn(dev, "SRIOV enable failed\n");
2696 return 0;
2697 }
2698
2699 status = be_vf_setup_init(adapter);
2700 if (status)
2701 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002702
Sathya Perla11ac75e2011-12-13 00:58:50 +00002703 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002704 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2705
2706 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2707 BE_IF_FLAGS_BROADCAST |
2708 BE_IF_FLAGS_MULTICAST);
2709
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002710 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2711 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002712 if (status)
2713 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002714 }
2715
Sathya Perla39f1d942012-05-08 19:41:24 +00002716 if (!enabled_vfs) {
2717 status = be_vf_eth_addr_config(adapter);
2718 if (status)
2719 goto err;
2720 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002721
Sathya Perla11ac75e2011-12-13 00:58:50 +00002722 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002723 lnk_speed = 1000;
2724 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002725 if (status)
2726 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002727 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002728
2729 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2730 vf + 1, vf_cfg->if_handle);
2731 if (status)
2732 goto err;
2733 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002734
2735 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002736 }
2737 return 0;
2738err:
2739 return status;
2740}
2741
Sathya Perla30128032011-11-10 19:17:57 +00002742static void be_setup_init(struct be_adapter *adapter)
2743{
2744 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002745 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002746 adapter->if_handle = -1;
2747 adapter->be3_native = false;
2748 adapter->promiscuous = false;
2749 adapter->eq_next_idx = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002750
2751 if (be_physfn(adapter))
2752 adapter->cmd_privileges = MAX_PRIVILEGES;
2753 else
2754 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002755}
2756
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002757static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2758 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002759{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002760 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002761
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002762 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2763 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2764 if (!lancer_chip(adapter) && !be_physfn(adapter))
2765 *active_mac = true;
2766 else
2767 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002768
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002769 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002770 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002771
2772 if (lancer_chip(adapter)) {
2773 status = be_cmd_get_mac_from_list(adapter, mac,
2774 active_mac, pmac_id, 0);
2775 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002776 status = be_cmd_mac_addr_query(adapter, mac, false,
2777 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002778 }
2779 } else if (be_physfn(adapter)) {
2780 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002781 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002782 *active_mac = false;
2783 } else {
2784 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002785 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002786 if_handle, 0);
2787 *active_mac = true;
2788 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002789 return status;
2790}
2791
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002792static void be_get_resources(struct be_adapter *adapter)
2793{
2794 int status;
2795 bool profile_present = false;
2796
2797 if (lancer_chip(adapter)) {
2798 status = be_cmd_get_func_config(adapter);
2799
2800 if (!status)
2801 profile_present = true;
2802 }
2803
2804 if (profile_present) {
2805 /* Sanity fixes for Lancer */
2806 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2807 BE_UC_PMAC_COUNT);
2808 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2809 BE_NUM_VLANS_SUPPORTED);
2810 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2811 BE_MAX_MC);
2812 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2813 MAX_TX_QS);
2814 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2815 BE3_MAX_RSS_QS);
2816 adapter->max_event_queues = min_t(u16,
2817 adapter->max_event_queues,
2818 BE3_MAX_RSS_QS);
2819
2820 if (adapter->max_rss_queues &&
2821 adapter->max_rss_queues == adapter->max_rx_queues)
2822 adapter->max_rss_queues -= 1;
2823
2824 if (adapter->max_event_queues < adapter->max_rss_queues)
2825 adapter->max_rss_queues = adapter->max_event_queues;
2826
2827 } else {
2828 if (be_physfn(adapter))
2829 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2830 else
2831 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2832
2833 if (adapter->function_mode & FLEX10_MODE)
2834 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2835 else
2836 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2837
2838 adapter->max_mcast_mac = BE_MAX_MC;
2839 adapter->max_tx_queues = MAX_TX_QS;
2840 adapter->max_rss_queues = (adapter->be3_native) ?
2841 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2842 adapter->max_event_queues = BE3_MAX_RSS_QS;
2843
2844 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2845 BE_IF_FLAGS_BROADCAST |
2846 BE_IF_FLAGS_MULTICAST |
2847 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2848 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2849 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2850 BE_IF_FLAGS_PROMISCUOUS;
2851
2852 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2853 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2854 }
2855}
2856
Sathya Perla39f1d942012-05-08 19:41:24 +00002857/* Routine to query per function resource limits */
2858static int be_get_config(struct be_adapter *adapter)
2859{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002860 int pos, status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002861 u16 dev_num_vfs;
2862
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002863 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2864 &adapter->function_mode,
2865 &adapter->function_caps);
2866 if (status)
2867 goto err;
2868
2869 be_get_resources(adapter);
2870
2871 /* primary mac needs 1 pmac entry */
2872 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2873 sizeof(u32), GFP_KERNEL);
2874 if (!adapter->pmac_id) {
2875 status = -ENOMEM;
2876 goto err;
2877 }
2878
Sathya Perla39f1d942012-05-08 19:41:24 +00002879 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2880 if (pos) {
2881 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2882 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002883 if (!lancer_chip(adapter))
2884 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002885 adapter->dev_num_vfs = dev_num_vfs;
2886 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002887err:
2888 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002889}
2890
Sathya Perla5fb379e2009-06-18 00:02:59 +00002891static int be_setup(struct be_adapter *adapter)
2892{
Sathya Perla39f1d942012-05-08 19:41:24 +00002893 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002894 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002895 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002896 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002897 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002898 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899
Sathya Perla30128032011-11-10 19:17:57 +00002900 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002901
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002902 if (!lancer_chip(adapter))
2903 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002904
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002905 status = be_get_config(adapter);
2906 if (status)
2907 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002908
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002909 be_msix_enable(adapter);
2910
2911 status = be_evt_queues_create(adapter);
2912 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002913 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002914
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002915 status = be_tx_cqs_create(adapter);
2916 if (status)
2917 goto err;
2918
2919 status = be_rx_cqs_create(adapter);
2920 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002921 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002922
Sathya Perla5fb379e2009-06-18 00:02:59 +00002923 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002924 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002925 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002926
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002927 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2928 /* In UMC mode FW does not return right privileges.
2929 * Override with correct privilege equivalent to PF.
2930 */
2931 if (be_is_mc(adapter))
2932 adapter->cmd_privileges = MAX_PRIVILEGES;
2933
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002934 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2935 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002936
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002937 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002938 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002939
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002940 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002941
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002942 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002943 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002944 if (status != 0)
2945 goto err;
2946
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002947 memset(mac, 0, ETH_ALEN);
2948 active_mac = false;
2949 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2950 &active_mac, &adapter->pmac_id[0]);
2951 if (status != 0)
2952 goto err;
2953
2954 if (!active_mac) {
2955 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2956 &adapter->pmac_id[0], 0);
2957 if (status != 0)
2958 goto err;
2959 }
2960
2961 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2962 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2963 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002964 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002965
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002966 status = be_tx_qs_create(adapter);
2967 if (status)
2968 goto err;
2969
Sathya Perla04b71172011-09-27 13:30:27 -04002970 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002971
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002972 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002973 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002974
2975 be_set_rx_mode(adapter->netdev);
2976
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002977 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002978
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002979 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2980 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002981 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002982
Sathya Perla39f1d942012-05-08 19:41:24 +00002983 if (be_physfn(adapter) && num_vfs) {
2984 if (adapter->dev_num_vfs)
2985 be_vf_setup(adapter);
2986 else
2987 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002988 }
2989
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002990 status = be_cmd_get_phy_info(adapter);
2991 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002992 adapter->phy.fc_autoneg = 1;
2993
Sathya Perla191eb752012-02-23 18:50:13 +00002994 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2995 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002996 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002997err:
2998 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002999 return status;
3000}
3001
Ivan Vecera66268732011-12-08 01:31:21 +00003002#ifdef CONFIG_NET_POLL_CONTROLLER
3003static void be_netpoll(struct net_device *netdev)
3004{
3005 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003006 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003007 int i;
3008
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003009 for_all_evt_queues(adapter, eqo, i)
3010 event_handle(eqo);
3011
3012 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003013}
3014#endif
3015
Ajit Khaparde84517482009-09-04 03:12:16 +00003016#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003017char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3018
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003019static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003020 const u8 *p, u32 img_start, int image_size,
3021 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003022{
3023 u32 crc_offset;
3024 u8 flashed_crc[4];
3025 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003026
3027 crc_offset = hdr_size + img_start + image_size - 4;
3028
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003029 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003030
3031 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003032 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003033 if (status) {
3034 dev_err(&adapter->pdev->dev,
3035 "could not get crc from flash, not flashing redboot\n");
3036 return false;
3037 }
3038
3039 /*update redboot only if crc does not match*/
3040 if (!memcmp(flashed_crc, p, 4))
3041 return false;
3042 else
3043 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003044}
3045
Sathya Perla306f1342011-08-02 19:57:45 +00003046static bool phy_flashing_required(struct be_adapter *adapter)
3047{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003048 return (adapter->phy.phy_type == TN_8022 &&
3049 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003050}
3051
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003052static bool is_comp_in_ufi(struct be_adapter *adapter,
3053 struct flash_section_info *fsec, int type)
3054{
3055 int i = 0, img_type = 0;
3056 struct flash_section_info_g2 *fsec_g2 = NULL;
3057
3058 if (adapter->generation != BE_GEN3)
3059 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3060
3061 for (i = 0; i < MAX_FLASH_COMP; i++) {
3062 if (fsec_g2)
3063 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3064 else
3065 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3066
3067 if (img_type == type)
3068 return true;
3069 }
3070 return false;
3071
3072}
3073
3074struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3075 int header_size,
3076 const struct firmware *fw)
3077{
3078 struct flash_section_info *fsec = NULL;
3079 const u8 *p = fw->data;
3080
3081 p += header_size;
3082 while (p < (fw->data + fw->size)) {
3083 fsec = (struct flash_section_info *)p;
3084 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3085 return fsec;
3086 p += 32;
3087 }
3088 return NULL;
3089}
3090
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003091static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003092 const struct firmware *fw,
3093 struct be_dma_mem *flash_cmd,
3094 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003095
Ajit Khaparde84517482009-09-04 03:12:16 +00003096{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003097 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003098 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003099 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00003100 int num_bytes;
3101 const u8 *p = fw->data;
3102 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08003103 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003104 int num_comp, hdr_size;
3105 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003106
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003107 struct flash_comp gen3_flash_types[] = {
3108 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3109 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3110 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3111 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3112 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3113 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3114 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3115 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3116 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3117 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3118 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3119 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3120 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3121 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3122 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3123 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3124 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3125 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3126 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3127 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003128 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003129
3130 struct flash_comp gen2_flash_types[] = {
3131 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3132 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3133 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3134 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3135 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3136 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3137 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3138 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3139 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3140 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3141 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3142 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3143 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3144 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3145 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3146 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003147 };
3148
3149 if (adapter->generation == BE_GEN3) {
3150 pflashcomp = gen3_flash_types;
3151 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003152 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003153 } else {
3154 pflashcomp = gen2_flash_types;
3155 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003156 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003157 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003158 /* Get flash section info*/
3159 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3160 if (!fsec) {
3161 dev_err(&adapter->pdev->dev,
3162 "Invalid Cookie. UFI corrupted ?\n");
3163 return -1;
3164 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003165 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003166 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003167 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003168
3169 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3170 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3171 continue;
3172
3173 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003174 if (!phy_flashing_required(adapter))
3175 continue;
3176 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003177
3178 hdr_size = filehdr_size +
3179 (num_of_images * sizeof(struct image_hdr));
3180
3181 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3182 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3183 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003184 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003185
3186 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003187 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003188 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003189 if (p + pflashcomp[i].size > fw->data + fw->size)
3190 return -1;
3191 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003192 while (total_bytes) {
3193 if (total_bytes > 32*1024)
3194 num_bytes = 32*1024;
3195 else
3196 num_bytes = total_bytes;
3197 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003198 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003199 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003200 flash_op = FLASHROM_OPER_PHY_FLASH;
3201 else
3202 flash_op = FLASHROM_OPER_FLASH;
3203 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003204 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003205 flash_op = FLASHROM_OPER_PHY_SAVE;
3206 else
3207 flash_op = FLASHROM_OPER_SAVE;
3208 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003209 memcpy(req->params.data_buf, p, num_bytes);
3210 p += num_bytes;
3211 status = be_cmd_write_flashrom(adapter, flash_cmd,
3212 pflashcomp[i].optype, flash_op, num_bytes);
3213 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003214 if ((status == ILLEGAL_IOCTL_REQ) &&
3215 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003216 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003217 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003218 dev_err(&adapter->pdev->dev,
3219 "cmd to write to flash rom failed.\n");
3220 return -1;
3221 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003222 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003223 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003224 return 0;
3225}
3226
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003227static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3228{
3229 if (fhdr == NULL)
3230 return 0;
3231 if (fhdr->build[0] == '3')
3232 return BE_GEN3;
3233 else if (fhdr->build[0] == '2')
3234 return BE_GEN2;
3235 else
3236 return 0;
3237}
3238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003239static int lancer_wait_idle(struct be_adapter *adapter)
3240{
3241#define SLIPORT_IDLE_TIMEOUT 30
3242 u32 reg_val;
3243 int status = 0, i;
3244
3245 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3246 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3247 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3248 break;
3249
3250 ssleep(1);
3251 }
3252
3253 if (i == SLIPORT_IDLE_TIMEOUT)
3254 status = -1;
3255
3256 return status;
3257}
3258
3259static int lancer_fw_reset(struct be_adapter *adapter)
3260{
3261 int status = 0;
3262
3263 status = lancer_wait_idle(adapter);
3264 if (status)
3265 return status;
3266
3267 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3268 PHYSDEV_CONTROL_OFFSET);
3269
3270 return status;
3271}
3272
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003273static int lancer_fw_download(struct be_adapter *adapter,
3274 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003275{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003276#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3277#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3278 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003279 const u8 *data_ptr = NULL;
3280 u8 *dest_image_ptr = NULL;
3281 size_t image_size = 0;
3282 u32 chunk_size = 0;
3283 u32 data_written = 0;
3284 u32 offset = 0;
3285 int status = 0;
3286 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003287 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003288
3289 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3290 dev_err(&adapter->pdev->dev,
3291 "FW Image not properly aligned. "
3292 "Length must be 4 byte aligned.\n");
3293 status = -EINVAL;
3294 goto lancer_fw_exit;
3295 }
3296
3297 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3298 + LANCER_FW_DOWNLOAD_CHUNK;
3299 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3300 &flash_cmd.dma, GFP_KERNEL);
3301 if (!flash_cmd.va) {
3302 status = -ENOMEM;
3303 dev_err(&adapter->pdev->dev,
3304 "Memory allocation failure while flashing\n");
3305 goto lancer_fw_exit;
3306 }
3307
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003308 dest_image_ptr = flash_cmd.va +
3309 sizeof(struct lancer_cmd_req_write_object);
3310 image_size = fw->size;
3311 data_ptr = fw->data;
3312
3313 while (image_size) {
3314 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3315
3316 /* Copy the image chunk content. */
3317 memcpy(dest_image_ptr, data_ptr, chunk_size);
3318
3319 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003320 chunk_size, offset,
3321 LANCER_FW_DOWNLOAD_LOCATION,
3322 &data_written, &change_status,
3323 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003324 if (status)
3325 break;
3326
3327 offset += data_written;
3328 data_ptr += data_written;
3329 image_size -= data_written;
3330 }
3331
3332 if (!status) {
3333 /* Commit the FW written */
3334 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003335 0, offset,
3336 LANCER_FW_DOWNLOAD_LOCATION,
3337 &data_written, &change_status,
3338 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003339 }
3340
3341 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3342 flash_cmd.dma);
3343 if (status) {
3344 dev_err(&adapter->pdev->dev,
3345 "Firmware load error. "
3346 "Status code: 0x%x Additional Status: 0x%x\n",
3347 status, add_status);
3348 goto lancer_fw_exit;
3349 }
3350
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003351 if (change_status == LANCER_FW_RESET_NEEDED) {
3352 status = lancer_fw_reset(adapter);
3353 if (status) {
3354 dev_err(&adapter->pdev->dev,
3355 "Adapter busy for FW reset.\n"
3356 "New FW will not be active.\n");
3357 goto lancer_fw_exit;
3358 }
3359 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3360 dev_err(&adapter->pdev->dev,
3361 "System reboot required for new FW"
3362 " to be active\n");
3363 }
3364
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003365 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3366lancer_fw_exit:
3367 return status;
3368}
3369
3370static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3371{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003372 struct flash_file_hdr_g2 *fhdr;
3373 struct flash_file_hdr_g3 *fhdr3;
3374 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003375 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003376 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003377 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003378
3379 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003380 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003381
Ajit Khaparde84517482009-09-04 03:12:16 +00003382 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003383 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3384 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003385 if (!flash_cmd.va) {
3386 status = -ENOMEM;
3387 dev_err(&adapter->pdev->dev,
3388 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003389 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003390 }
3391
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003392 if ((adapter->generation == BE_GEN3) &&
3393 (get_ufigen_type(fhdr) == BE_GEN3)) {
3394 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003395 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3396 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003397 img_hdr_ptr = (struct image_hdr *) (fw->data +
3398 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003399 i * sizeof(struct image_hdr)));
3400 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3401 status = be_flash_data(adapter, fw, &flash_cmd,
3402 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003403 }
3404 } else if ((adapter->generation == BE_GEN2) &&
3405 (get_ufigen_type(fhdr) == BE_GEN2)) {
3406 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3407 } else {
3408 dev_err(&adapter->pdev->dev,
3409 "UFI and Interface are not compatible for flashing\n");
3410 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003411 }
3412
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003413 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3414 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003415 if (status) {
3416 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003417 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003418 }
3419
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003420 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003421
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003422be_fw_exit:
3423 return status;
3424}
3425
3426int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3427{
3428 const struct firmware *fw;
3429 int status;
3430
3431 if (!netif_running(adapter->netdev)) {
3432 dev_err(&adapter->pdev->dev,
3433 "Firmware load not allowed (interface is down)\n");
3434 return -1;
3435 }
3436
3437 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3438 if (status)
3439 goto fw_exit;
3440
3441 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3442
3443 if (lancer_chip(adapter))
3444 status = lancer_fw_download(adapter, fw);
3445 else
3446 status = be_fw_download(adapter, fw);
3447
Ajit Khaparde84517482009-09-04 03:12:16 +00003448fw_exit:
3449 release_firmware(fw);
3450 return status;
3451}
3452
stephen hemmingere5686ad2012-01-05 19:10:25 +00003453static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003454 .ndo_open = be_open,
3455 .ndo_stop = be_close,
3456 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003457 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003458 .ndo_set_mac_address = be_mac_addr_set,
3459 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003460 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003461 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003462 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3463 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003464 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003465 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003466 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003467 .ndo_get_vf_config = be_get_vf_config,
3468#ifdef CONFIG_NET_POLL_CONTROLLER
3469 .ndo_poll_controller = be_netpoll,
3470#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003471};
3472
3473static void be_netdev_init(struct net_device *netdev)
3474{
3475 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003476 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003477 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003478
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003479 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003480 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3481 NETIF_F_HW_VLAN_TX;
3482 if (be_multi_rxq(adapter))
3483 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003484
3485 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003486 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003487
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003488 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003489 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003490
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003491 netdev->priv_flags |= IFF_UNICAST_FLT;
3492
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003493 netdev->flags |= IFF_MULTICAST;
3494
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003495 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003496
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003497 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003498
3499 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3500
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003501 for_all_evt_queues(adapter, eqo, i)
3502 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003503}
3504
3505static void be_unmap_pci_bars(struct be_adapter *adapter)
3506{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003507 if (adapter->csr)
3508 iounmap(adapter->csr);
3509 if (adapter->db)
3510 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003511 if (adapter->roce_db.base)
3512 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3513}
3514
3515static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3516{
3517 struct pci_dev *pdev = adapter->pdev;
3518 u8 __iomem *addr;
3519
3520 addr = pci_iomap(pdev, 2, 0);
3521 if (addr == NULL)
3522 return -ENOMEM;
3523
3524 adapter->roce_db.base = addr;
3525 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3526 adapter->roce_db.size = 8192;
3527 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3528 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003529}
3530
3531static int be_map_pci_bars(struct be_adapter *adapter)
3532{
3533 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003534 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003535
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003536 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003537 if (be_type_2_3(adapter)) {
3538 addr = ioremap_nocache(
3539 pci_resource_start(adapter->pdev, 0),
3540 pci_resource_len(adapter->pdev, 0));
3541 if (addr == NULL)
3542 return -ENOMEM;
3543 adapter->db = addr;
3544 }
3545 if (adapter->if_type == SLI_INTF_TYPE_3) {
3546 if (lancer_roce_map_pci_bars(adapter))
3547 goto pci_map_err;
3548 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003549 return 0;
3550 }
3551
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003552 if (be_physfn(adapter)) {
3553 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3554 pci_resource_len(adapter->pdev, 2));
3555 if (addr == NULL)
3556 return -ENOMEM;
3557 adapter->csr = addr;
3558 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003559
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003560 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003561 db_reg = 4;
3562 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003563 if (be_physfn(adapter))
3564 db_reg = 4;
3565 else
3566 db_reg = 0;
3567 }
3568 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3569 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003570 if (addr == NULL)
3571 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003572 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003573 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3574 adapter->roce_db.size = 4096;
3575 adapter->roce_db.io_addr =
3576 pci_resource_start(adapter->pdev, db_reg);
3577 adapter->roce_db.total_size =
3578 pci_resource_len(adapter->pdev, db_reg);
3579 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003580 return 0;
3581pci_map_err:
3582 be_unmap_pci_bars(adapter);
3583 return -ENOMEM;
3584}
3585
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003586static void be_ctrl_cleanup(struct be_adapter *adapter)
3587{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003588 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003589
3590 be_unmap_pci_bars(adapter);
3591
3592 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003593 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3594 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003595
Sathya Perla5b8821b2011-08-02 19:57:44 +00003596 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003597 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003598 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3599 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003600}
3601
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003602static int be_ctrl_init(struct be_adapter *adapter)
3603{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003604 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3605 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003606 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003607 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003608
3609 status = be_map_pci_bars(adapter);
3610 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003611 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003612
3613 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003614 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3615 mbox_mem_alloc->size,
3616 &mbox_mem_alloc->dma,
3617 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003618 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003619 status = -ENOMEM;
3620 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003621 }
3622 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3623 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3624 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3625 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003626
Sathya Perla5b8821b2011-08-02 19:57:44 +00003627 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3628 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3629 &rx_filter->dma, GFP_KERNEL);
3630 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003631 status = -ENOMEM;
3632 goto free_mbox;
3633 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003634 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003635 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003636 spin_lock_init(&adapter->mcc_lock);
3637 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003638
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003639 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003640 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003641 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003642
3643free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003644 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3645 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003646
3647unmap_pci_bars:
3648 be_unmap_pci_bars(adapter);
3649
3650done:
3651 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003652}
3653
3654static void be_stats_cleanup(struct be_adapter *adapter)
3655{
Sathya Perla3abcded2010-10-03 22:12:27 -07003656 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003657
3658 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003659 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3660 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003661}
3662
3663static int be_stats_init(struct be_adapter *adapter)
3664{
Sathya Perla3abcded2010-10-03 22:12:27 -07003665 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003666
Selvin Xavier005d5692011-05-16 07:36:35 +00003667 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003668 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003669 } else {
3670 if (lancer_chip(adapter))
3671 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3672 else
3673 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3674 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003675 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3676 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003677 if (cmd->va == NULL)
3678 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003679 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003680 return 0;
3681}
3682
3683static void __devexit be_remove(struct pci_dev *pdev)
3684{
3685 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003686
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003687 if (!adapter)
3688 return;
3689
Parav Pandit045508a2012-03-26 14:27:13 +00003690 be_roce_dev_remove(adapter);
3691
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003692 cancel_delayed_work_sync(&adapter->func_recovery_work);
3693
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003694 unregister_netdev(adapter->netdev);
3695
Sathya Perla5fb379e2009-06-18 00:02:59 +00003696 be_clear(adapter);
3697
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003698 /* tell fw we're done with firing cmds */
3699 be_cmd_fw_clean(adapter);
3700
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003701 be_stats_cleanup(adapter);
3702
3703 be_ctrl_cleanup(adapter);
3704
Sathya Perlad6b6d982012-09-05 01:56:48 +00003705 pci_disable_pcie_error_reporting(pdev);
3706
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003707 pci_set_drvdata(pdev, NULL);
3708 pci_release_regions(pdev);
3709 pci_disable_device(pdev);
3710
3711 free_netdev(adapter->netdev);
3712}
3713
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003714bool be_is_wol_supported(struct be_adapter *adapter)
3715{
3716 return ((adapter->wol_cap & BE_WOL_CAP) &&
3717 !be_is_wol_excluded(adapter)) ? true : false;
3718}
3719
Somnath Kotur941a77d2012-05-17 22:59:03 +00003720u32 be_get_fw_log_level(struct be_adapter *adapter)
3721{
3722 struct be_dma_mem extfat_cmd;
3723 struct be_fat_conf_params *cfgs;
3724 int status;
3725 u32 level = 0;
3726 int j;
3727
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003728 if (lancer_chip(adapter))
3729 return 0;
3730
Somnath Kotur941a77d2012-05-17 22:59:03 +00003731 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3732 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3733 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3734 &extfat_cmd.dma);
3735
3736 if (!extfat_cmd.va) {
3737 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3738 __func__);
3739 goto err;
3740 }
3741
3742 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3743 if (!status) {
3744 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3745 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003746 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003747 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3748 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3749 }
3750 }
3751 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3752 extfat_cmd.dma);
3753err:
3754 return level;
3755}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003756
Sathya Perla39f1d942012-05-08 19:41:24 +00003757static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003758{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003759 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003760 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003761
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003762 status = be_cmd_get_cntl_attributes(adapter);
3763 if (status)
3764 return status;
3765
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003766 status = be_cmd_get_acpi_wol_cap(adapter);
3767 if (status) {
3768 /* in case of a failure to get wol capabillities
3769 * check the exclusion list to determine WOL capability */
3770 if (!be_is_wol_excluded(adapter))
3771 adapter->wol_cap |= BE_WOL_CAP;
3772 }
3773
3774 if (be_is_wol_supported(adapter))
3775 adapter->wol = true;
3776
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003777 /* Must be a power of 2 or else MODULO will BUG_ON */
3778 adapter->be_get_temp_freq = 64;
3779
Somnath Kotur941a77d2012-05-17 22:59:03 +00003780 level = be_get_fw_log_level(adapter);
3781 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3782
Sathya Perla2243e2e2009-11-22 22:02:03 +00003783 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003784}
3785
Sathya Perla39f1d942012-05-08 19:41:24 +00003786static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003787{
3788 struct pci_dev *pdev = adapter->pdev;
3789 u32 sli_intf = 0, if_type;
3790
3791 switch (pdev->device) {
3792 case BE_DEVICE_ID1:
3793 case OC_DEVICE_ID1:
3794 adapter->generation = BE_GEN2;
3795 break;
3796 case BE_DEVICE_ID2:
3797 case OC_DEVICE_ID2:
3798 adapter->generation = BE_GEN3;
3799 break;
3800 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003801 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003802 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003803 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3804 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003805 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3806 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003807 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003808 !be_type_2_3(adapter)) {
3809 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3810 return -EINVAL;
3811 }
3812 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3813 SLI_INTF_FAMILY_SHIFT);
3814 adapter->generation = BE_GEN3;
3815 break;
3816 case OC_DEVICE_ID5:
3817 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3818 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003819 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3820 return -EINVAL;
3821 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003822 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3823 SLI_INTF_FAMILY_SHIFT);
3824 adapter->generation = BE_GEN3;
3825 break;
3826 default:
3827 adapter->generation = 0;
3828 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003829
3830 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3831 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003832 return 0;
3833}
3834
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003835static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003836{
3837 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003838
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003839 status = lancer_test_and_set_rdy_state(adapter);
3840 if (status)
3841 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003842
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003843 if (netif_running(adapter->netdev))
3844 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003845
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003846 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003847
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003848 adapter->hw_error = false;
3849 adapter->fw_timeout = false;
3850
3851 status = be_setup(adapter);
3852 if (status)
3853 goto err;
3854
3855 if (netif_running(adapter->netdev)) {
3856 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003857 if (status)
3858 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003859 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003860
3861 dev_err(&adapter->pdev->dev,
3862 "Adapter SLIPORT recovery succeeded\n");
3863 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003864err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003865 if (adapter->eeh_error)
3866 dev_err(&adapter->pdev->dev,
3867 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003868
3869 return status;
3870}
3871
3872static void be_func_recovery_task(struct work_struct *work)
3873{
3874 struct be_adapter *adapter =
3875 container_of(work, struct be_adapter, func_recovery_work.work);
3876 int status;
3877
3878 be_detect_error(adapter);
3879
3880 if (adapter->hw_error && lancer_chip(adapter)) {
3881
3882 if (adapter->eeh_error)
3883 goto out;
3884
3885 rtnl_lock();
3886 netif_device_detach(adapter->netdev);
3887 rtnl_unlock();
3888
3889 status = lancer_recover_func(adapter);
3890
3891 if (!status)
3892 netif_device_attach(adapter->netdev);
3893 }
3894
3895out:
3896 schedule_delayed_work(&adapter->func_recovery_work,
3897 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003898}
3899
3900static void be_worker(struct work_struct *work)
3901{
3902 struct be_adapter *adapter =
3903 container_of(work, struct be_adapter, work.work);
3904 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003905 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003906 int i;
3907
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003908 /* when interrupts are not yet enabled, just reap any pending
3909 * mcc completions */
3910 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00003911 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003912 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00003913 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003914 goto reschedule;
3915 }
3916
3917 if (!adapter->stats_cmd_sent) {
3918 if (lancer_chip(adapter))
3919 lancer_cmd_get_pport_stats(adapter,
3920 &adapter->stats_cmd);
3921 else
3922 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3923 }
3924
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003925 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3926 be_cmd_get_die_temperature(adapter);
3927
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003928 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003929 if (rxo->rx_post_starved) {
3930 rxo->rx_post_starved = false;
3931 be_post_rx_frags(rxo, GFP_KERNEL);
3932 }
3933 }
3934
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003935 for_all_evt_queues(adapter, eqo, i)
3936 be_eqd_update(adapter, eqo);
3937
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003938reschedule:
3939 adapter->work_counter++;
3940 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3941}
3942
Sathya Perla39f1d942012-05-08 19:41:24 +00003943static bool be_reset_required(struct be_adapter *adapter)
3944{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003945 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003946}
3947
Sathya Perlad3791422012-09-28 04:39:44 +00003948static char *mc_name(struct be_adapter *adapter)
3949{
3950 if (adapter->function_mode & FLEX10_MODE)
3951 return "FLEX10";
3952 else if (adapter->function_mode & VNIC_MODE)
3953 return "vNIC";
3954 else if (adapter->function_mode & UMC_ENABLED)
3955 return "UMC";
3956 else
3957 return "";
3958}
3959
3960static inline char *func_name(struct be_adapter *adapter)
3961{
3962 return be_physfn(adapter) ? "PF" : "VF";
3963}
3964
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003965static int __devinit be_probe(struct pci_dev *pdev,
3966 const struct pci_device_id *pdev_id)
3967{
3968 int status = 0;
3969 struct be_adapter *adapter;
3970 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003971 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003972
3973 status = pci_enable_device(pdev);
3974 if (status)
3975 goto do_none;
3976
3977 status = pci_request_regions(pdev, DRV_NAME);
3978 if (status)
3979 goto disable_dev;
3980 pci_set_master(pdev);
3981
Sathya Perla7f640062012-06-05 19:37:20 +00003982 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003983 if (netdev == NULL) {
3984 status = -ENOMEM;
3985 goto rel_reg;
3986 }
3987 adapter = netdev_priv(netdev);
3988 adapter->pdev = pdev;
3989 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003990
Sathya Perla39f1d942012-05-08 19:41:24 +00003991 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003992 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003993 goto free_netdev;
3994
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003995 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003996 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003997
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003998 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003999 if (!status) {
4000 netdev->features |= NETIF_F_HIGHDMA;
4001 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004002 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004003 if (status) {
4004 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4005 goto free_netdev;
4006 }
4007 }
4008
Sathya Perlad6b6d982012-09-05 01:56:48 +00004009 status = pci_enable_pcie_error_reporting(pdev);
4010 if (status)
4011 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4012
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004013 status = be_ctrl_init(adapter);
4014 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004015 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004016
Sathya Perla2243e2e2009-11-22 22:02:03 +00004017 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004018 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004019 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004020 if (status)
4021 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004022 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004023
4024 /* tell fw we're ready to fire cmds */
4025 status = be_cmd_fw_init(adapter);
4026 if (status)
4027 goto ctrl_clean;
4028
Sathya Perla39f1d942012-05-08 19:41:24 +00004029 if (be_reset_required(adapter)) {
4030 status = be_cmd_reset_function(adapter);
4031 if (status)
4032 goto ctrl_clean;
4033 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004034
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004035 /* The INTR bit may be set in the card when probed by a kdump kernel
4036 * after a crash.
4037 */
4038 if (!lancer_chip(adapter))
4039 be_intr_set(adapter, false);
4040
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004041 status = be_stats_init(adapter);
4042 if (status)
4043 goto ctrl_clean;
4044
Sathya Perla39f1d942012-05-08 19:41:24 +00004045 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004046 if (status)
4047 goto stats_clean;
4048
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004049 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004050 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004051 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004052
Sathya Perla5fb379e2009-06-18 00:02:59 +00004053 status = be_setup(adapter);
4054 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004055 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004056
Sathya Perla3abcded2010-10-03 22:12:27 -07004057 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004058 status = register_netdev(netdev);
4059 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004060 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004061
Parav Pandit045508a2012-03-26 14:27:13 +00004062 be_roce_dev_add(adapter);
4063
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004064 schedule_delayed_work(&adapter->func_recovery_work,
4065 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004066
4067 be_cmd_query_port_name(adapter, &port_name);
4068
Sathya Perlad3791422012-09-28 04:39:44 +00004069 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4070 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004071
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004072 return 0;
4073
Sathya Perla5fb379e2009-06-18 00:02:59 +00004074unsetup:
4075 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004076stats_clean:
4077 be_stats_cleanup(adapter);
4078ctrl_clean:
4079 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004080free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004081 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004082 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004083rel_reg:
4084 pci_release_regions(pdev);
4085disable_dev:
4086 pci_disable_device(pdev);
4087do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004088 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004089 return status;
4090}
4091
4092static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4093{
4094 struct be_adapter *adapter = pci_get_drvdata(pdev);
4095 struct net_device *netdev = adapter->netdev;
4096
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004097 if (adapter->wol)
4098 be_setup_wol(adapter, true);
4099
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004100 cancel_delayed_work_sync(&adapter->func_recovery_work);
4101
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004102 netif_device_detach(netdev);
4103 if (netif_running(netdev)) {
4104 rtnl_lock();
4105 be_close(netdev);
4106 rtnl_unlock();
4107 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004108 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004109
4110 pci_save_state(pdev);
4111 pci_disable_device(pdev);
4112 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4113 return 0;
4114}
4115
4116static int be_resume(struct pci_dev *pdev)
4117{
4118 int status = 0;
4119 struct be_adapter *adapter = pci_get_drvdata(pdev);
4120 struct net_device *netdev = adapter->netdev;
4121
4122 netif_device_detach(netdev);
4123
4124 status = pci_enable_device(pdev);
4125 if (status)
4126 return status;
4127
4128 pci_set_power_state(pdev, 0);
4129 pci_restore_state(pdev);
4130
Sathya Perla2243e2e2009-11-22 22:02:03 +00004131 /* tell fw we're ready to fire cmds */
4132 status = be_cmd_fw_init(adapter);
4133 if (status)
4134 return status;
4135
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004136 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004137 if (netif_running(netdev)) {
4138 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004139 be_open(netdev);
4140 rtnl_unlock();
4141 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004142
4143 schedule_delayed_work(&adapter->func_recovery_work,
4144 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004145 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004146
4147 if (adapter->wol)
4148 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004149
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004150 return 0;
4151}
4152
Sathya Perla82456b02010-02-17 01:35:37 +00004153/*
4154 * An FLR will stop BE from DMAing any data.
4155 */
4156static void be_shutdown(struct pci_dev *pdev)
4157{
4158 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004159
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004160 if (!adapter)
4161 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004162
Sathya Perla0f4a6822011-03-21 20:49:28 +00004163 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004164 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004165
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004166 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004167
Sathya Perla82456b02010-02-17 01:35:37 +00004168 if (adapter->wol)
4169 be_setup_wol(adapter, true);
4170
Ajit Khaparde57841862011-04-06 18:08:43 +00004171 be_cmd_reset_function(adapter);
4172
Sathya Perla82456b02010-02-17 01:35:37 +00004173 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004174}
4175
Sathya Perlacf588472010-02-14 21:22:01 +00004176static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4177 pci_channel_state_t state)
4178{
4179 struct be_adapter *adapter = pci_get_drvdata(pdev);
4180 struct net_device *netdev = adapter->netdev;
4181
4182 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4183
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004184 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004185
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004186 cancel_delayed_work_sync(&adapter->func_recovery_work);
4187
4188 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004189 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004190 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004191
4192 if (netif_running(netdev)) {
4193 rtnl_lock();
4194 be_close(netdev);
4195 rtnl_unlock();
4196 }
4197 be_clear(adapter);
4198
4199 if (state == pci_channel_io_perm_failure)
4200 return PCI_ERS_RESULT_DISCONNECT;
4201
4202 pci_disable_device(pdev);
4203
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004204 /* The error could cause the FW to trigger a flash debug dump.
4205 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004206 * can cause it not to recover; wait for it to finish.
4207 * Wait only for first function as it is needed only once per
4208 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004209 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004210 if (pdev->devfn == 0)
4211 ssleep(30);
4212
Sathya Perlacf588472010-02-14 21:22:01 +00004213 return PCI_ERS_RESULT_NEED_RESET;
4214}
4215
4216static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4217{
4218 struct be_adapter *adapter = pci_get_drvdata(pdev);
4219 int status;
4220
4221 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004222 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004223
4224 status = pci_enable_device(pdev);
4225 if (status)
4226 return PCI_ERS_RESULT_DISCONNECT;
4227
4228 pci_set_master(pdev);
4229 pci_set_power_state(pdev, 0);
4230 pci_restore_state(pdev);
4231
4232 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004233 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004234 if (status)
4235 return PCI_ERS_RESULT_DISCONNECT;
4236
Sathya Perlad6b6d982012-09-05 01:56:48 +00004237 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004238 return PCI_ERS_RESULT_RECOVERED;
4239}
4240
4241static void be_eeh_resume(struct pci_dev *pdev)
4242{
4243 int status = 0;
4244 struct be_adapter *adapter = pci_get_drvdata(pdev);
4245 struct net_device *netdev = adapter->netdev;
4246
4247 dev_info(&adapter->pdev->dev, "EEH resume\n");
4248
4249 pci_save_state(pdev);
4250
4251 /* tell fw we're ready to fire cmds */
4252 status = be_cmd_fw_init(adapter);
4253 if (status)
4254 goto err;
4255
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004256 status = be_cmd_reset_function(adapter);
4257 if (status)
4258 goto err;
4259
Sathya Perlacf588472010-02-14 21:22:01 +00004260 status = be_setup(adapter);
4261 if (status)
4262 goto err;
4263
4264 if (netif_running(netdev)) {
4265 status = be_open(netdev);
4266 if (status)
4267 goto err;
4268 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004269
4270 schedule_delayed_work(&adapter->func_recovery_work,
4271 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004272 netif_device_attach(netdev);
4273 return;
4274err:
4275 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004276}
4277
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004278static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004279 .error_detected = be_eeh_err_detected,
4280 .slot_reset = be_eeh_reset,
4281 .resume = be_eeh_resume,
4282};
4283
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004284static struct pci_driver be_driver = {
4285 .name = DRV_NAME,
4286 .id_table = be_dev_ids,
4287 .probe = be_probe,
4288 .remove = be_remove,
4289 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004290 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004291 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004292 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004293};
4294
4295static int __init be_init_module(void)
4296{
Joe Perches8e95a202009-12-03 07:58:21 +00004297 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4298 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004299 printk(KERN_WARNING DRV_NAME
4300 " : Module param rx_frag_size must be 2048/4096/8192."
4301 " Using 2048\n");
4302 rx_frag_size = 2048;
4303 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004304
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004305 return pci_register_driver(&be_driver);
4306}
4307module_init(be_init_module);
4308
4309static void __exit be_exit_module(void)
4310{
4311 pci_unregister_driver(&be_driver);
4312}
4313module_exit(be_exit_module);