blob: 7a483fdd974ba1d548b891f76dd9df76ca694a12 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070051static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000052 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070086static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000087 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700110 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
Sathya Perla752961a2011-10-24 02:45:03 +0000121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 mem->va = NULL;
135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000150 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
Sathya Perla8788fdc2009-07-27 22:52:03 +0000155static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000159 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000160 return;
161
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000205 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000206 return;
207
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000224 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000225 return;
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000238 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000239 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000240 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000242 if (!is_valid_ether_addr(addr->sa_data))
243 return -EADDRNOTAVAIL;
244
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000245 /* For BE VF, MAC address is already activated by PF.
246 * Hence only operation left is updating netdev->devaddr.
247 * Update it if user is passing the same MAC which was used
248 * during configuring VF MAC from PF(Hypervisor).
249 */
250 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
251 status = be_cmd_mac_addr_query(adapter, current_mac,
252 false, adapter->if_handle, 0);
253 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
254 goto done;
255 else
256 goto err;
257 }
258
259 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
260 goto done;
261
262 /* For Lancer check if any MAC is active.
263 * If active, get its mac id.
264 */
265 if (lancer_chip(adapter) && !be_physfn(adapter))
266 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
267 &pmac_id, 0);
268
269 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
270 adapter->if_handle,
271 &adapter->pmac_id[0], 0);
272
Sathya Perlaa65027e2009-08-17 00:58:04 +0000273 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000274 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700275
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000276 if (active_mac)
277 be_cmd_pmac_del(adapter, adapter->if_handle,
278 pmac_id, 0);
279done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000280 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
281 return 0;
282err:
283 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700284 return status;
285}
286
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287static void populate_be2_stats(struct be_adapter *adapter)
288{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
290 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
291 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000293 &rxf_stats->port[adapter->port_num];
294 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000295
Sathya Perlaac124ff2011-07-25 19:10:14 +0000296 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000297 drvs->rx_pause_frames = port_stats->rx_pause_frames;
298 drvs->rx_crc_errors = port_stats->rx_crc_errors;
299 drvs->rx_control_frames = port_stats->rx_control_frames;
300 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
301 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
302 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
303 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
304 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
305 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
306 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
307 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
308 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
309 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
310 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000311 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000312 drvs->rx_dropped_header_too_small =
313 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000314 drvs->rx_address_mismatch_drops =
315 port_stats->rx_address_mismatch_drops +
316 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000317 drvs->rx_alignment_symbol_errors =
318 port_stats->rx_alignment_symbol_errors;
319
320 drvs->tx_pauseframes = port_stats->tx_pauseframes;
321 drvs->tx_controlframes = port_stats->tx_controlframes;
322
323 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000324 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000326 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000327 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000328 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000329 drvs->forwarded_packets = rxf_stats->forwarded_packets;
330 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
332 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000333 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
334}
335
336static void populate_be3_stats(struct be_adapter *adapter)
337{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
339 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
340 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 &rxf_stats->port[adapter->port_num];
343 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000346 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
347 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348 drvs->rx_pause_frames = port_stats->rx_pause_frames;
349 drvs->rx_crc_errors = port_stats->rx_crc_errors;
350 drvs->rx_control_frames = port_stats->rx_control_frames;
351 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
352 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
353 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
355 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
356 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
357 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
358 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
359 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
360 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
361 drvs->rx_dropped_header_too_small =
362 port_stats->rx_dropped_header_too_small;
363 drvs->rx_input_fifo_overflow_drop =
364 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000365 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366 drvs->rx_alignment_symbol_errors =
367 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->tx_pauseframes = port_stats->tx_pauseframes;
370 drvs->tx_controlframes = port_stats->tx_controlframes;
371 drvs->jabber_events = port_stats->jabber_events;
372 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000373 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->forwarded_packets = rxf_stats->forwarded_packets;
375 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000376 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
377 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
379}
380
Selvin Xavier005d5692011-05-16 07:36:35 +0000381static void populate_lancer_stats(struct be_adapter *adapter)
382{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 struct lancer_pport_stats *pport_stats =
386 pport_stats_from_cmd(adapter);
387
388 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
389 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
390 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
391 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000392 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
395 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
396 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
397 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
398 drvs->rx_dropped_tcp_length =
399 pport_stats->rx_dropped_invalid_tcp_length;
400 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
403 drvs->rx_dropped_header_too_small =
404 pport_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000406 drvs->rx_address_mismatch_drops =
407 pport_stats->rx_address_mismatch_drops +
408 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000409 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000410 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
412 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 drvs->forwarded_packets = pport_stats->num_forwards_lo;
415 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000416 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000418}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419
Sathya Perla09c1c682011-08-22 19:41:53 +0000420static void accumulate_16bit_val(u32 *acc, u16 val)
421{
422#define lo(x) (x & 0xFFFF)
423#define hi(x) (x & 0xFFFF0000)
424 bool wrapped = val < lo(*acc);
425 u32 newacc = hi(*acc) + val;
426
427 if (wrapped)
428 newacc += 65536;
429 ACCESS_ONCE(*acc) = newacc;
430}
431
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432void be_parse_stats(struct be_adapter *adapter)
433{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000434 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
435 struct be_rx_obj *rxo;
436 int i;
437
Selvin Xavier005d5692011-05-16 07:36:35 +0000438 if (adapter->generation == BE_GEN3) {
439 if (lancer_chip(adapter))
440 populate_lancer_stats(adapter);
441 else
442 populate_be3_stats(adapter);
443 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000445 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000446
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000447 if (lancer_chip(adapter))
448 goto done;
449
Sathya Perlaac124ff2011-07-25 19:10:14 +0000450 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000451 for_all_rx_queues(adapter, rxo, i) {
452 /* below erx HW counter can actually wrap around after
453 * 65535. Driver accumulates a 32-bit value
454 */
455 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
456 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
457 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000458done:
459 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460}
461
Sathya Perlaab1594e2011-07-25 19:10:15 +0000462static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
463 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700464{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000466 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700467 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000468 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000469 u64 pkts, bytes;
470 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700471 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472
Sathya Perla3abcded2010-10-03 22:12:27 -0700473 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000474 const struct be_rx_stats *rx_stats = rx_stats(rxo);
475 do {
476 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
477 pkts = rx_stats(rxo)->rx_pkts;
478 bytes = rx_stats(rxo)->rx_bytes;
479 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
480 stats->rx_packets += pkts;
481 stats->rx_bytes += bytes;
482 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
483 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
484 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700485 }
486
Sathya Perla3c8def92011-06-12 20:01:58 +0000487 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 const struct be_tx_stats *tx_stats = tx_stats(txo);
489 do {
490 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
491 pkts = tx_stats(txo)->tx_pkts;
492 bytes = tx_stats(txo)->tx_bytes;
493 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
494 stats->tx_packets += pkts;
495 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000496 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497
498 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_alignment_symbol_errors +
501 drvs->rx_in_range_errors +
502 drvs->rx_out_range_errors +
503 drvs->rx_frame_too_long +
504 drvs->rx_dropped_too_small +
505 drvs->rx_dropped_too_short +
506 drvs->rx_dropped_header_too_small +
507 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000508 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700510 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000511 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000512 drvs->rx_out_range_errors +
513 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000514
Sathya Perlaab1594e2011-07-25 19:10:15 +0000515 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516
517 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000518 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000519
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520 /* receiver fifo overrun */
521 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000522 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000523 drvs->rx_input_fifo_overflow_drop +
524 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526}
527
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000528void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 struct net_device *netdev = adapter->netdev;
531
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000532 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000533 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000534 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000536
537 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
538 netif_carrier_on(netdev);
539 else
540 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541}
542
Sathya Perla3c8def92011-06-12 20:01:58 +0000543static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000544 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700545{
Sathya Perla3c8def92011-06-12 20:01:58 +0000546 struct be_tx_stats *stats = tx_stats(txo);
547
Sathya Perlaab1594e2011-07-25 19:10:15 +0000548 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000549 stats->tx_reqs++;
550 stats->tx_wrbs += wrb_cnt;
551 stats->tx_bytes += copied;
552 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000554 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000555 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700556}
557
558/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000559static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
560 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700562 int cnt = (skb->len > skb->data_len);
563
564 cnt += skb_shinfo(skb)->nr_frags;
565
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700566 /* to account for hdr wrb */
567 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000568 if (lancer_chip(adapter) || !(cnt & 1)) {
569 *dummy = false;
570 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* add a dummy to make it an even num */
572 cnt++;
573 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000574 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
576 return cnt;
577}
578
579static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
580{
581 wrb->frag_pa_hi = upper_32_bits(addr);
582 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
583 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000584 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585}
586
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000587static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
588 struct sk_buff *skb)
589{
590 u8 vlan_prio;
591 u16 vlan_tag;
592
593 vlan_tag = vlan_tx_tag_get(skb);
594 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
595 /* If vlan priority provided by OS is NOT in available bmap */
596 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
597 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
598 adapter->recommended_prio;
599
600 return vlan_tag;
601}
602
Somnath Kotur93040ae2012-06-26 22:32:10 +0000603static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
604{
605 return vlan_tx_tag_present(skb) || adapter->pvid;
606}
607
Somnath Koturcc4ce022010-10-21 07:11:14 -0700608static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
609 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000611 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700612
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 memset(hdr, 0, sizeof(*hdr));
614
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
616
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000617 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
620 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000621 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000622 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000623 if (lancer_chip(adapter) && adapter->sli_family ==
624 LANCER_A0_SLI_FAMILY) {
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
626 if (is_tcp_pkt(skb))
627 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
628 tcpcs, hdr, 1);
629 else if (is_udp_pkt(skb))
630 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
631 udpcs, hdr, 1);
632 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
634 if (is_tcp_pkt(skb))
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
636 else if (is_udp_pkt(skb))
637 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
638 }
639
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700640 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000642 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700643 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 }
645
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
649 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
650}
651
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000653 bool unmap_single)
654{
655 dma_addr_t dma;
656
657 be_dws_le_to_cpu(wrb, sizeof(*wrb));
658
659 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000660 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000661 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000662 dma_unmap_single(dev, dma, wrb->frag_len,
663 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000664 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000665 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000666 }
667}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
Sathya Perla3c8def92011-06-12 20:01:58 +0000669static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
671{
Sathya Perla7101e112010-03-22 20:41:12 +0000672 dma_addr_t busaddr;
673 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 struct be_eth_wrb *wrb;
677 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000678 bool map_single = false;
679 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 hdr = queue_head_node(txq);
682 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000683 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
David S. Millerebc8d2a2009-06-09 01:01:31 -0700685 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700686 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000687 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
688 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000689 goto dma_err;
690 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700691 wrb = queue_head_node(txq);
692 wrb_fill(wrb, busaddr, len);
693 be_dws_cpu_to_le(wrb, sizeof(*wrb));
694 queue_head_inc(txq);
695 copied += len;
696 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
David S. Millerebc8d2a2009-06-09 01:01:31 -0700698 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000699 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700700 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000701 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000702 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000704 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700705 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000706 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700707 be_dws_cpu_to_le(wrb, sizeof(*wrb));
708 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000709 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 }
711
712 if (dummy_wrb) {
713 wrb = queue_head_node(txq);
714 wrb_fill(wrb, 0, 0);
715 be_dws_cpu_to_le(wrb, sizeof(*wrb));
716 queue_head_inc(txq);
717 }
718
Somnath Koturcc4ce022010-10-21 07:11:14 -0700719 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 be_dws_cpu_to_le(hdr, sizeof(*hdr));
721
722 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000723dma_err:
724 txq->head = map_head;
725 while (copied) {
726 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000727 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000728 map_single = false;
729 copied -= wrb->frag_len;
730 queue_head_inc(txq);
731 }
732 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733}
734
Somnath Kotur93040ae2012-06-26 22:32:10 +0000735static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
736 struct sk_buff *skb)
737{
738 u16 vlan_tag = 0;
739
740 skb = skb_share_check(skb, GFP_ATOMIC);
741 if (unlikely(!skb))
742 return skb;
743
744 if (vlan_tx_tag_present(skb)) {
745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746 __vlan_put_tag(skb, vlan_tag);
747 skb->vlan_tci = 0;
748 }
749
750 return skb;
751}
752
Stephen Hemminger613573252009-08-31 19:50:58 +0000753static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700754 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755{
756 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000757 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
758 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000759 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000761 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762 bool dummy_wrb, stopped = false;
763
Somnath Kotur93040ae2012-06-26 22:32:10 +0000764 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
765 VLAN_ETH_HLEN : ETH_HLEN;
766
767 /* HW has a bug which considers padding bytes as legal
768 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000769 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000770 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
771 is_ipv4_pkt(skb)) {
772 ip = (struct iphdr *)ip_hdr(skb);
773 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
774 }
775
776 /* HW has a bug wherein it will calculate CSUM for VLAN
777 * pkts even though it is disabled.
778 * Manually insert VLAN in pkt.
779 */
780 if (skb->ip_summed != CHECKSUM_PARTIAL &&
781 be_vlan_tag_chk(adapter, skb)) {
782 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000783 if (unlikely(!skb))
784 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000785 }
786
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000787 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788
Sathya Perla3c8def92011-06-12 20:01:58 +0000789 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000790 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000791 int gso_segs = skb_shinfo(skb)->gso_segs;
792
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000793 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000794 BUG_ON(txo->sent_skb_list[start]);
795 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000797 /* Ensure txq has space for the next skb; Else stop the queue
798 * *BEFORE* ringing the tx doorbell, so that we serialze the
799 * tx compls of the current transmit which'll wake up the queue
800 */
Sathya Perla7101e112010-03-22 20:41:12 +0000801 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000802 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
803 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000804 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000805 stopped = true;
806 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000808 be_txq_notify(adapter, txq->id, wrb_cnt);
809
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000810 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000811 } else {
812 txq->head = start;
813 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000815tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 return NETDEV_TX_OK;
817}
818
819static int be_change_mtu(struct net_device *netdev, int new_mtu)
820{
821 struct be_adapter *adapter = netdev_priv(netdev);
822 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000823 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
824 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 dev_info(&adapter->pdev->dev,
826 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000827 BE_MIN_MTU,
828 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829 return -EINVAL;
830 }
831 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
832 netdev->mtu, new_mtu);
833 netdev->mtu = new_mtu;
834 return 0;
835}
836
837/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000838 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
839 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 */
Sathya Perla10329df2012-06-05 19:37:18 +0000841static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842{
Sathya Perla10329df2012-06-05 19:37:18 +0000843 u16 vids[BE_NUM_VLANS_SUPPORTED];
844 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000845 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000846
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000847 /* No need to further configure vids if in promiscuous mode */
848 if (adapter->promiscuous)
849 return 0;
850
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000851 if (adapter->vlans_added > adapter->max_vlans)
852 goto set_vlan_promisc;
853
854 /* Construct VLAN Table to give to HW */
855 for (i = 0; i < VLAN_N_VID; i++)
856 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000857 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000858
859 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000860 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000861
862 /* Set to VLAN promisc mode as setting VLAN filter failed */
863 if (status) {
864 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
865 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
866 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700867 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000868
Sathya Perlab31c50a2009-09-17 10:30:13 -0700869 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000870
871set_vlan_promisc:
872 status = be_cmd_vlan_config(adapter, adapter->if_handle,
873 NULL, 0, 1, 1);
874 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875}
876
Jiri Pirko8e586132011-12-08 19:52:37 -0500877static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878{
879 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000880 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000882 if (!be_physfn(adapter)) {
883 status = -EINVAL;
884 goto ret;
885 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000886
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700887 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000888 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000889 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500890
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000891 if (!status)
892 adapter->vlans_added++;
893 else
894 adapter->vlan_tag[vid] = 0;
895ret:
896 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897}
898
Jiri Pirko8e586132011-12-08 19:52:37 -0500899static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700900{
901 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000902 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700903
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000904 if (!be_physfn(adapter)) {
905 status = -EINVAL;
906 goto ret;
907 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000908
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700909 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000910 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000911 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500912
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000913 if (!status)
914 adapter->vlans_added--;
915 else
916 adapter->vlan_tag[vid] = 1;
917ret:
918 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700919}
920
Sathya Perlaa54769f2011-10-24 02:45:00 +0000921static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700922{
923 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000924 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700925
926 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000927 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000928 adapter->promiscuous = true;
929 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000931
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300932 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000933 if (adapter->promiscuous) {
934 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000935 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000936
937 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000938 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000939 }
940
Sathya Perlae7b909a2009-11-22 22:01:10 +0000941 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000942 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000943 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000944 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000945 goto done;
946 }
947
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000948 if (netdev_uc_count(netdev) != adapter->uc_macs) {
949 struct netdev_hw_addr *ha;
950 int i = 1; /* First slot is claimed by the Primary MAC */
951
952 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
953 be_cmd_pmac_del(adapter, adapter->if_handle,
954 adapter->pmac_id[i], 0);
955 }
956
957 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
958 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
959 adapter->promiscuous = true;
960 goto done;
961 }
962
963 netdev_for_each_uc_addr(ha, adapter->netdev) {
964 adapter->uc_macs++; /* First slot is for Primary MAC */
965 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
966 adapter->if_handle,
967 &adapter->pmac_id[adapter->uc_macs], 0);
968 }
969 }
970
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000971 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
972
973 /* Set to MCAST promisc mode if setting MULTICAST address fails */
974 if (status) {
975 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
976 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
977 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
978 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000979done:
980 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700981}
982
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000983static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
984{
985 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000986 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000987 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000988 bool active_mac = false;
989 u32 pmac_id;
990 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000991
Sathya Perla11ac75e2011-12-13 00:58:50 +0000992 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000993 return -EPERM;
994
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000996 return -EINVAL;
997
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000998 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000999 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1000 &pmac_id, vf + 1);
1001 if (!status && active_mac)
1002 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1003 pmac_id, vf + 1);
1004
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001005 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1006 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001007 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1008 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001009
Sathya Perla11ac75e2011-12-13 00:58:50 +00001010 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1011 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001012 }
1013
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001014 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001015 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1016 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001017 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001019
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001020 return status;
1021}
1022
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001023static int be_get_vf_config(struct net_device *netdev, int vf,
1024 struct ifla_vf_info *vi)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001027 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001028
Sathya Perla11ac75e2011-12-13 00:58:50 +00001029 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001030 return -EPERM;
1031
Sathya Perla11ac75e2011-12-13 00:58:50 +00001032 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001033 return -EINVAL;
1034
1035 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001036 vi->tx_rate = vf_cfg->tx_rate;
1037 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001038 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001039 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001040
1041 return 0;
1042}
1043
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001044static int be_set_vf_vlan(struct net_device *netdev,
1045 int vf, u16 vlan, u8 qos)
1046{
1047 struct be_adapter *adapter = netdev_priv(netdev);
1048 int status = 0;
1049
Sathya Perla11ac75e2011-12-13 00:58:50 +00001050 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001051 return -EPERM;
1052
Sathya Perla11ac75e2011-12-13 00:58:50 +00001053 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001054 return -EINVAL;
1055
1056 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001057 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1058 /* If this is new value, program it. Else skip. */
1059 adapter->vf_cfg[vf].vlan_tag = vlan;
1060
1061 status = be_cmd_set_hsw_config(adapter, vlan,
1062 vf + 1, adapter->vf_cfg[vf].if_handle);
1063 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001064 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001065 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001066 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001067 vlan = adapter->vf_cfg[vf].def_vid;
1068 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1069 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001070 }
1071
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001072
1073 if (status)
1074 dev_info(&adapter->pdev->dev,
1075 "VLAN %d config on VF %d failed\n", vlan, vf);
1076 return status;
1077}
1078
Ajit Khapardee1d18732010-07-23 01:52:13 +00001079static int be_set_vf_tx_rate(struct net_device *netdev,
1080 int vf, int rate)
1081{
1082 struct be_adapter *adapter = netdev_priv(netdev);
1083 int status = 0;
1084
Sathya Perla11ac75e2011-12-13 00:58:50 +00001085 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001086 return -EPERM;
1087
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001088 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001089 return -EINVAL;
1090
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001091 if (rate < 100 || rate > 10000) {
1092 dev_err(&adapter->pdev->dev,
1093 "tx rate must be between 100 and 10000 Mbps\n");
1094 return -EINVAL;
1095 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001096
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001097 if (lancer_chip(adapter))
1098 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1099 else
1100 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001101
1102 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001103 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001104 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001105 else
1106 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001107 return status;
1108}
1109
Sathya Perla39f1d942012-05-08 19:41:24 +00001110static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1111{
1112 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001113 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001114 u16 offset, stride;
1115
1116 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001117 if (!pos)
1118 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001119 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1120 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1121
1122 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1123 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001124 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001125 vfs++;
1126 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1127 assigned_vfs++;
1128 }
1129 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1130 }
1131 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1132}
1133
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001134static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001136 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001137 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001138 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001139 u64 pkts;
1140 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142 if (!eqo->enable_aic) {
1143 eqd = eqo->eqd;
1144 goto modify_eqd;
1145 }
1146
1147 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001148 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001150 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1151
Sathya Perla4097f662009-03-24 16:40:13 -07001152 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001153 if (time_before(now, stats->rx_jiffies)) {
1154 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001155 return;
1156 }
1157
Sathya Perlaac124ff2011-07-25 19:10:14 +00001158 /* Update once a second */
1159 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001160 return;
1161
Sathya Perlaab1594e2011-07-25 19:10:15 +00001162 do {
1163 start = u64_stats_fetch_begin_bh(&stats->sync);
1164 pkts = stats->rx_pkts;
1165 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1166
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001167 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001168 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001169 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001170 eqd = (stats->rx_pps / 110000) << 3;
1171 eqd = min(eqd, eqo->max_eqd);
1172 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001173 if (eqd < 10)
1174 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001175
1176modify_eqd:
1177 if (eqd != eqo->cur_eqd) {
1178 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1179 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001180 }
Sathya Perla4097f662009-03-24 16:40:13 -07001181}
1182
Sathya Perla3abcded2010-10-03 22:12:27 -07001183static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001184 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001185{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001186 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001187
Sathya Perlaab1594e2011-07-25 19:10:15 +00001188 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001189 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001190 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001191 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001193 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001194 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001195 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001196 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197}
1198
Sathya Perla2e588f82011-03-11 02:49:26 +00001199static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001200{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001201 /* L4 checksum is not reliable for non TCP/UDP packets.
1202 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1204 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001205}
1206
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001207static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1208 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001210 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001212 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001213
Sathya Perla3abcded2010-10-03 22:12:27 -07001214 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215 BUG_ON(!rx_page_info->page);
1216
Ajit Khaparde205859a2010-02-09 01:34:21 +00001217 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001218 dma_unmap_page(&adapter->pdev->dev,
1219 dma_unmap_addr(rx_page_info, bus),
1220 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001221 rx_page_info->last_page_user = false;
1222 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223
1224 atomic_dec(&rxq->used);
1225 return rx_page_info;
1226}
1227
1228/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001229static void be_rx_compl_discard(struct be_rx_obj *rxo,
1230 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231{
Sathya Perla3abcded2010-10-03 22:12:27 -07001232 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001234 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001236 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001237 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001238 put_page(page_info->page);
1239 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001240 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 }
1242}
1243
1244/*
1245 * skb_fill_rx_data forms a complete skb for an ether frame
1246 * indicated by rxcp.
1247 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001248static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1249 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250{
Sathya Perla3abcded2010-10-03 22:12:27 -07001251 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001253 u16 i, j;
1254 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 u8 *start;
1256
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001257 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258 start = page_address(page_info->page) + page_info->page_offset;
1259 prefetch(start);
1260
1261 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264 skb->len = curr_frag_len;
1265 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001266 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267 /* Complete packet has now been moved to data */
1268 put_page(page_info->page);
1269 skb->data_len = 0;
1270 skb->tail += curr_frag_len;
1271 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001272 hdr_len = ETH_HLEN;
1273 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001275 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276 skb_shinfo(skb)->frags[0].page_offset =
1277 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001278 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001280 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001281 skb->tail += hdr_len;
1282 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001283 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284
Sathya Perla2e588f82011-03-11 02:49:26 +00001285 if (rxcp->pkt_size <= rx_frag_size) {
1286 BUG_ON(rxcp->num_rcvd != 1);
1287 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288 }
1289
1290 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001291 index_inc(&rxcp->rxq_idx, rxq->len);
1292 remaining = rxcp->pkt_size - curr_frag_len;
1293 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001294 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001295 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001297 /* Coalesce all frags from the same physical page in one slot */
1298 if (page_info->page_offset == 0) {
1299 /* Fresh page */
1300 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001301 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001302 skb_shinfo(skb)->frags[j].page_offset =
1303 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001304 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001305 skb_shinfo(skb)->nr_frags++;
1306 } else {
1307 put_page(page_info->page);
1308 }
1309
Eric Dumazet9e903e02011-10-18 21:00:24 +00001310 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001311 skb->len += curr_frag_len;
1312 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001313 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001314 remaining -= curr_frag_len;
1315 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001316 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001318 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319}
1320
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001321/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001322static void be_rx_compl_process(struct be_rx_obj *rxo,
1323 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001325 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001326 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001328
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001329 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001330 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001331 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001332 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001333 return;
1334 }
1335
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001336 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001338 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001339 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001340 else
1341 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001343 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001344 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001345 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001346 skb->rxhash = rxcp->rss_hash;
1347
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348
Jiri Pirko343e43c2011-08-25 02:50:51 +00001349 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001350 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1351
1352 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353}
1354
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001355/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001356void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1357 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001359 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001361 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001362 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001363 u16 remaining, curr_frag_len;
1364 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001365
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001366 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001367 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001368 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001369 return;
1370 }
1371
Sathya Perla2e588f82011-03-11 02:49:26 +00001372 remaining = rxcp->pkt_size;
1373 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001374 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375
1376 curr_frag_len = min(remaining, rx_frag_size);
1377
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001378 /* Coalesce all frags from the same physical page in one slot */
1379 if (i == 0 || page_info->page_offset == 0) {
1380 /* First frag or Fresh page */
1381 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001382 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001383 skb_shinfo(skb)->frags[j].page_offset =
1384 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001385 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001386 } else {
1387 put_page(page_info->page);
1388 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001389 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001390 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001392 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 memset(page_info, 0, sizeof(*page_info));
1394 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001395 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001397 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001398 skb->len = rxcp->pkt_size;
1399 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001400 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001401 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001402 if (adapter->netdev->features & NETIF_F_RXHASH)
1403 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001404
Jiri Pirko343e43c2011-08-25 02:50:51 +00001405 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001406 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1407
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001408 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409}
1410
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001411static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1412 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413{
Sathya Perla2e588f82011-03-11 02:49:26 +00001414 rxcp->pkt_size =
1415 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1416 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1417 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1418 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001419 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001420 rxcp->ip_csum =
1421 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1422 rxcp->l4_csum =
1423 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1424 rxcp->ipv6 =
1425 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1426 rxcp->rxq_idx =
1427 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1428 rxcp->num_rcvd =
1429 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1430 rxcp->pkt_type =
1431 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001432 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001433 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001434 if (rxcp->vlanf) {
1435 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001436 compl);
1437 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1438 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001439 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001440 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001441}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001443static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1444 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001445{
1446 rxcp->pkt_size =
1447 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1448 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1449 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1450 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001451 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001452 rxcp->ip_csum =
1453 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1454 rxcp->l4_csum =
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1456 rxcp->ipv6 =
1457 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1458 rxcp->rxq_idx =
1459 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1460 rxcp->num_rcvd =
1461 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1462 rxcp->pkt_type =
1463 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001464 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001465 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001466 if (rxcp->vlanf) {
1467 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001468 compl);
1469 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1470 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001471 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001472 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001473}
1474
1475static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1476{
1477 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1478 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1479 struct be_adapter *adapter = rxo->adapter;
1480
1481 /* For checking the valid bit it is Ok to use either definition as the
1482 * valid bit is at the same position in both v0 and v1 Rx compl */
1483 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 return NULL;
1485
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001486 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001487 be_dws_le_to_cpu(compl, sizeof(*compl));
1488
1489 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001490 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001491 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001492 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001493
Sathya Perla15d72182011-03-21 20:49:26 +00001494 if (rxcp->vlanf) {
1495 /* vlanf could be wrongly set in some cards.
1496 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001497 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001498 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001499
Sathya Perla15d72182011-03-21 20:49:26 +00001500 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001501 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001502
Somnath Kotur939cf302011-08-18 21:51:49 -07001503 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001504 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001505 rxcp->vlanf = 0;
1506 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001507
1508 /* As the compl has been parsed, reset it; we wont touch it again */
1509 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510
Sathya Perla3abcded2010-10-03 22:12:27 -07001511 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 return rxcp;
1513}
1514
Eric Dumazet1829b082011-03-01 05:48:12 +00001515static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001518
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001520 gfp |= __GFP_COMP;
1521 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522}
1523
1524/*
1525 * Allocate a page, split it to fragments of size rx_frag_size and post as
1526 * receive buffers to BE
1527 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001528static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529{
Sathya Perla3abcded2010-10-03 22:12:27 -07001530 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001531 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001532 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533 struct page *pagep = NULL;
1534 struct be_eth_rx_d *rxd;
1535 u64 page_dmaaddr = 0, frag_dmaaddr;
1536 u32 posted, page_offset = 0;
1537
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1540 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001541 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001543 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 break;
1545 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001546 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1547 0, adapter->big_page_size,
1548 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 page_info->page_offset = 0;
1550 } else {
1551 get_page(pagep);
1552 page_info->page_offset = page_offset + rx_frag_size;
1553 }
1554 page_offset = page_info->page_offset;
1555 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001556 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1558
1559 rxd = queue_head_node(rxq);
1560 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1561 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562
1563 /* Any space left in the current big page for another frag? */
1564 if ((page_offset + rx_frag_size + rx_frag_size) >
1565 adapter->big_page_size) {
1566 pagep = NULL;
1567 page_info->last_page_user = true;
1568 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001569
1570 prev_page_info = page_info;
1571 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001572 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573 }
1574 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001575 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576
1577 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001579 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001580 } else if (atomic_read(&rxq->used) == 0) {
1581 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001582 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584}
1585
Sathya Perla5fb379e2009-06-18 00:02:59 +00001586static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1589
1590 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1591 return NULL;
1592
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001593 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1595
1596 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1597
1598 queue_tail_inc(tx_cq);
1599 return txcp;
1600}
1601
Sathya Perla3c8def92011-06-12 20:01:58 +00001602static u16 be_tx_compl_process(struct be_adapter *adapter,
1603 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001604{
Sathya Perla3c8def92011-06-12 20:01:58 +00001605 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001606 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001607 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001609 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1610 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001612 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001614 sent_skbs[txq->tail] = NULL;
1615
1616 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001617 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001619 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001621 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001622 unmap_tx_frag(&adapter->pdev->dev, wrb,
1623 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001624 unmap_skb_hdr = false;
1625
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 num_wrbs++;
1627 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001628 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001631 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632}
1633
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001634/* Return the number of events in the event queue */
1635static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001636{
1637 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001638 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001639
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001640 do {
1641 eqe = queue_tail_node(&eqo->q);
1642 if (eqe->evt == 0)
1643 break;
1644
1645 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001646 eqe->evt = 0;
1647 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001648 queue_tail_inc(&eqo->q);
1649 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001650
1651 return num;
1652}
1653
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001654static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001655{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001656 bool rearm = false;
1657 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001658
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001659 /* Deal with any spurious interrupts that come without events */
1660 if (!num)
1661 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001662
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001663 if (num || msix_enabled(eqo->adapter))
1664 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1665
Sathya Perla859b1e42009-08-10 03:43:51 +00001666 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001667 napi_schedule(&eqo->napi);
1668
1669 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001670}
1671
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001672/* Leaves the EQ is disarmed state */
1673static void be_eq_clean(struct be_eq_obj *eqo)
1674{
1675 int num = events_get(eqo);
1676
1677 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1678}
1679
1680static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681{
1682 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001683 struct be_queue_info *rxq = &rxo->q;
1684 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001685 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686 u16 tail;
1687
1688 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001689 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001690 be_rx_compl_discard(rxo, rxcp);
1691 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692 }
1693
1694 /* Then free posted rx buffer that were not used */
1695 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001696 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001697 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698 put_page(page_info->page);
1699 memset(page_info, 0, sizeof(*page_info));
1700 }
1701 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001702 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703}
1704
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001705static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001707 struct be_tx_obj *txo;
1708 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001709 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001710 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001711 struct sk_buff *sent_skb;
1712 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001713 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714
Sathya Perlaa8e91792009-08-10 03:42:43 +00001715 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1716 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001717 pending_txqs = adapter->num_tx_qs;
1718
1719 for_all_tx_queues(adapter, txo, i) {
1720 txq = &txo->q;
1721 while ((txcp = be_tx_compl_get(&txo->cq))) {
1722 end_idx =
1723 AMAP_GET_BITS(struct amap_eth_tx_compl,
1724 wrb_index, txcp);
1725 num_wrbs += be_tx_compl_process(adapter, txo,
1726 end_idx);
1727 cmpl++;
1728 }
1729 if (cmpl) {
1730 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1731 atomic_sub(num_wrbs, &txq->used);
1732 cmpl = 0;
1733 num_wrbs = 0;
1734 }
1735 if (atomic_read(&txq->used) == 0)
1736 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001737 }
1738
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001739 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001740 break;
1741
1742 mdelay(1);
1743 } while (true);
1744
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001745 for_all_tx_queues(adapter, txo, i) {
1746 txq = &txo->q;
1747 if (atomic_read(&txq->used))
1748 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1749 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001750
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001751 /* free posted tx for which compls will never arrive */
1752 while (atomic_read(&txq->used)) {
1753 sent_skb = txo->sent_skb_list[txq->tail];
1754 end_idx = txq->tail;
1755 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1756 &dummy_wrb);
1757 index_adv(&end_idx, num_wrbs - 1, txq->len);
1758 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1759 atomic_sub(num_wrbs, &txq->used);
1760 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001761 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762}
1763
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001764static void be_evt_queues_destroy(struct be_adapter *adapter)
1765{
1766 struct be_eq_obj *eqo;
1767 int i;
1768
1769 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001770 if (eqo->q.created) {
1771 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001772 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001773 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001774 be_queue_free(adapter, &eqo->q);
1775 }
1776}
1777
1778static int be_evt_queues_create(struct be_adapter *adapter)
1779{
1780 struct be_queue_info *eq;
1781 struct be_eq_obj *eqo;
1782 int i, rc;
1783
1784 adapter->num_evt_qs = num_irqs(adapter);
1785
1786 for_all_evt_queues(adapter, eqo, i) {
1787 eqo->adapter = adapter;
1788 eqo->tx_budget = BE_TX_BUDGET;
1789 eqo->idx = i;
1790 eqo->max_eqd = BE_MAX_EQD;
1791 eqo->enable_aic = true;
1792
1793 eq = &eqo->q;
1794 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1795 sizeof(struct be_eq_entry));
1796 if (rc)
1797 return rc;
1798
1799 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1800 if (rc)
1801 return rc;
1802 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001803 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001804}
1805
Sathya Perla5fb379e2009-06-18 00:02:59 +00001806static void be_mcc_queues_destroy(struct be_adapter *adapter)
1807{
1808 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001809
Sathya Perla8788fdc2009-07-27 22:52:03 +00001810 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001811 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001812 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001813 be_queue_free(adapter, q);
1814
Sathya Perla8788fdc2009-07-27 22:52:03 +00001815 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001816 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001817 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001818 be_queue_free(adapter, q);
1819}
1820
1821/* Must be called only after TX qs are created as MCC shares TX EQ */
1822static int be_mcc_queues_create(struct be_adapter *adapter)
1823{
1824 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001825
Sathya Perla8788fdc2009-07-27 22:52:03 +00001826 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001827 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001828 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001829 goto err;
1830
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001831 /* Use the default EQ for MCC completions */
1832 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001833 goto mcc_cq_free;
1834
Sathya Perla8788fdc2009-07-27 22:52:03 +00001835 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001836 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1837 goto mcc_cq_destroy;
1838
Sathya Perla8788fdc2009-07-27 22:52:03 +00001839 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001840 goto mcc_q_free;
1841
1842 return 0;
1843
1844mcc_q_free:
1845 be_queue_free(adapter, q);
1846mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001847 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001848mcc_cq_free:
1849 be_queue_free(adapter, cq);
1850err:
1851 return -1;
1852}
1853
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854static void be_tx_queues_destroy(struct be_adapter *adapter)
1855{
1856 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001857 struct be_tx_obj *txo;
1858 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859
Sathya Perla3c8def92011-06-12 20:01:58 +00001860 for_all_tx_queues(adapter, txo, i) {
1861 q = &txo->q;
1862 if (q->created)
1863 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1864 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865
Sathya Perla3c8def92011-06-12 20:01:58 +00001866 q = &txo->cq;
1867 if (q->created)
1868 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1869 be_queue_free(adapter, q);
1870 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871}
1872
Sathya Perladafc0fe2011-10-24 02:45:02 +00001873static int be_num_txqs_want(struct be_adapter *adapter)
1874{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001875 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1876 be_is_mc(adapter) ||
1877 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perla39f1d942012-05-08 19:41:24 +00001878 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001879 return 1;
1880 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001881 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001882}
1883
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001884static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001886 struct be_queue_info *cq, *eq;
1887 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001888 struct be_tx_obj *txo;
1889 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
Sathya Perladafc0fe2011-10-24 02:45:02 +00001891 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001892 if (adapter->num_tx_qs != MAX_TX_QS) {
1893 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001894 netif_set_real_num_tx_queues(adapter->netdev,
1895 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001896 rtnl_unlock();
1897 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001898
Sathya Perla3c8def92011-06-12 20:01:58 +00001899 for_all_tx_queues(adapter, txo, i) {
1900 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001901 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1902 sizeof(struct be_eth_tx_compl));
1903 if (status)
1904 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001906 /* If num_evt_qs is less than num_tx_qs, then more than
1907 * one txq share an eq
1908 */
1909 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1910 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1911 if (status)
1912 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001913 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915}
1916
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917static int be_tx_qs_create(struct be_adapter *adapter)
1918{
1919 struct be_tx_obj *txo;
1920 int i, status;
1921
1922 for_all_tx_queues(adapter, txo, i) {
1923 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1924 sizeof(struct be_eth_wrb));
1925 if (status)
1926 return status;
1927
1928 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1929 if (status)
1930 return status;
1931 }
1932
Sathya Perlad3791422012-09-28 04:39:44 +00001933 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1934 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001935 return 0;
1936}
1937
1938static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939{
1940 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001941 struct be_rx_obj *rxo;
1942 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943
Sathya Perla3abcded2010-10-03 22:12:27 -07001944 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001945 q = &rxo->cq;
1946 if (q->created)
1947 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1948 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950}
1951
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001953{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001954 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001955 struct be_rx_obj *rxo;
1956 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001958 /* We'll create as many RSS rings as there are irqs.
1959 * But when there's only one irq there's no use creating RSS rings
1960 */
1961 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1962 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001963 if (adapter->num_rx_qs != MAX_RX_QS) {
1964 rtnl_lock();
1965 netif_set_real_num_rx_queues(adapter->netdev,
1966 adapter->num_rx_qs);
1967 rtnl_unlock();
1968 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001969
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001971 for_all_rx_queues(adapter, rxo, i) {
1972 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001973 cq = &rxo->cq;
1974 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1975 sizeof(struct be_eth_rx_compl));
1976 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001977 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001979 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1980 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001981 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001983 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984
Sathya Perlad3791422012-09-28 04:39:44 +00001985 dev_info(&adapter->pdev->dev,
1986 "created %d RSS queue(s) and 1 default RX queue\n",
1987 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001988 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001989}
1990
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991static irqreturn_t be_intx(int irq, void *dev)
1992{
1993 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001994 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 /* With INTx only one EQ is used */
1997 num_evts = event_handle(&adapter->eq_obj[0]);
1998 if (num_evts)
1999 return IRQ_HANDLED;
2000 else
2001 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002}
2003
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002006 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002008 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009 return IRQ_HANDLED;
2010}
2011
Sathya Perla2e588f82011-03-11 02:49:26 +00002012static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013{
Sathya Perla2e588f82011-03-11 02:49:26 +00002014 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015}
2016
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002017static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2018 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019{
Sathya Perla3abcded2010-10-03 22:12:27 -07002020 struct be_adapter *adapter = rxo->adapter;
2021 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002022 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023 u32 work_done;
2024
2025 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002026 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027 if (!rxcp)
2028 break;
2029
Sathya Perla12004ae2011-08-02 19:57:46 +00002030 /* Is it a flush compl that has no data */
2031 if (unlikely(rxcp->num_rcvd == 0))
2032 goto loop_continue;
2033
2034 /* Discard compl with partial DMA Lancer B0 */
2035 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002037 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002038 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002039
Sathya Perla12004ae2011-08-02 19:57:46 +00002040 /* On BE drop pkts that arrive due to imperfect filtering in
2041 * promiscuous mode on some skews
2042 */
2043 if (unlikely(rxcp->port != adapter->port_num &&
2044 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002046 goto loop_continue;
2047 }
2048
2049 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002050 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002051 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002052 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002053loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002054 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055 }
2056
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 if (work_done) {
2058 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002060 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2061 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002062 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064 return work_done;
2065}
2066
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2068 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002071 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002073 for (work_done = 0; work_done < budget; work_done++) {
2074 txcp = be_tx_compl_get(&txo->cq);
2075 if (!txcp)
2076 break;
2077 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002078 AMAP_GET_BITS(struct amap_eth_tx_compl,
2079 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002080 }
2081
2082 if (work_done) {
2083 be_cq_notify(adapter, txo->cq.id, true, work_done);
2084 atomic_sub(num_wrbs, &txo->q.used);
2085
2086 /* As Tx wrbs have been freed up, wake up netdev queue
2087 * if it was stopped due to lack of tx wrbs. */
2088 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2089 atomic_read(&txo->q.used) < txo->q.len / 2) {
2090 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002091 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002092
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002093 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2094 tx_stats(txo)->tx_compl += work_done;
2095 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2096 }
2097 return (work_done < budget); /* Done */
2098}
Sathya Perla3c8def92011-06-12 20:01:58 +00002099
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002100int be_poll(struct napi_struct *napi, int budget)
2101{
2102 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2103 struct be_adapter *adapter = eqo->adapter;
2104 int max_work = 0, work, i;
2105 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002106
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002107 /* Process all TXQs serviced by this EQ */
2108 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2109 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2110 eqo->tx_budget, i);
2111 if (!tx_done)
2112 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113 }
2114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 /* This loop will iterate twice for EQ0 in which
2116 * completions of the last RXQ (default one) are also processed
2117 * For other EQs the loop iterates only once
2118 */
2119 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2120 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2121 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002122 }
2123
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 if (is_mcc_eqo(eqo))
2125 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002126
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002127 if (max_work < budget) {
2128 napi_complete(napi);
2129 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2130 } else {
2131 /* As we'll continue in polling mode, count and clear events */
2132 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002133 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002134 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135}
2136
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002137void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002138{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002139 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2140 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002141 u32 i;
2142
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002143 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002144 return;
2145
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002146 if (lancer_chip(adapter)) {
2147 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2148 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2149 sliport_err1 = ioread32(adapter->db +
2150 SLIPORT_ERROR1_OFFSET);
2151 sliport_err2 = ioread32(adapter->db +
2152 SLIPORT_ERROR2_OFFSET);
2153 }
2154 } else {
2155 pci_read_config_dword(adapter->pdev,
2156 PCICFG_UE_STATUS_LOW, &ue_lo);
2157 pci_read_config_dword(adapter->pdev,
2158 PCICFG_UE_STATUS_HIGH, &ue_hi);
2159 pci_read_config_dword(adapter->pdev,
2160 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2161 pci_read_config_dword(adapter->pdev,
2162 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002163
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002164 ue_lo = (ue_lo & ~ue_lo_mask);
2165 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002166 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002167
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002168 /* On certain platforms BE hardware can indicate spurious UEs.
2169 * Allow the h/w to stop working completely in case of a real UE.
2170 * Hence not setting the hw_error for UE detection.
2171 */
2172 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002173 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002174 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002175 "Error detected in the card\n");
2176 }
2177
2178 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2179 dev_err(&adapter->pdev->dev,
2180 "ERR: sliport status 0x%x\n", sliport_status);
2181 dev_err(&adapter->pdev->dev,
2182 "ERR: sliport error1 0x%x\n", sliport_err1);
2183 dev_err(&adapter->pdev->dev,
2184 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002185 }
2186
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002187 if (ue_lo) {
2188 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2189 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002190 dev_err(&adapter->pdev->dev,
2191 "UE: %s bit set\n", ue_status_low_desc[i]);
2192 }
2193 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002194
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002195 if (ue_hi) {
2196 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2197 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002198 dev_err(&adapter->pdev->dev,
2199 "UE: %s bit set\n", ue_status_hi_desc[i]);
2200 }
2201 }
2202
2203}
2204
Sathya Perla8d56ff12009-11-22 22:02:26 +00002205static void be_msix_disable(struct be_adapter *adapter)
2206{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002207 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002208 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002209 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002210 }
2211}
2212
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213static uint be_num_rss_want(struct be_adapter *adapter)
2214{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002215 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002216
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002218 (lancer_chip(adapter) ||
2219 (!sriov_want(adapter) && be_physfn(adapter)))) {
2220 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002221 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2222 }
2223 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224}
2225
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226static void be_msix_enable(struct be_adapter *adapter)
2227{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002228#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002229 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002230 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002231
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232 /* If RSS queues are not used, need a vec for default RX Q */
2233 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002234 if (be_roce_supported(adapter)) {
2235 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2236 (num_online_cpus() + 1));
2237 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2238 num_vec += num_roce_vec;
2239 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2240 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002242
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002243 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244 adapter->msix_entries[i].entry = i;
2245
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002246 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002247 if (status == 0) {
2248 goto done;
2249 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002250 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002251 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002252 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002253 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002254 }
Sathya Perlad3791422012-09-28 04:39:44 +00002255
2256 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002257 return;
2258done:
Parav Pandit045508a2012-03-26 14:27:13 +00002259 if (be_roce_supported(adapter)) {
2260 if (num_vec > num_roce_vec) {
2261 adapter->num_msix_vec = num_vec - num_roce_vec;
2262 adapter->num_msix_roce_vec =
2263 num_vec - adapter->num_msix_vec;
2264 } else {
2265 adapter->num_msix_vec = num_vec;
2266 adapter->num_msix_roce_vec = 0;
2267 }
2268 } else
2269 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002270 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002271 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272}
2273
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002274static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002277 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002278}
2279
2280static int be_msix_register(struct be_adapter *adapter)
2281{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 struct net_device *netdev = adapter->netdev;
2283 struct be_eq_obj *eqo;
2284 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286 for_all_evt_queues(adapter, eqo, i) {
2287 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2288 vec = be_msix_vec_get(adapter, eqo);
2289 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002290 if (status)
2291 goto err_msix;
2292 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002293
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002295err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2297 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2298 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2299 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002300 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301 return status;
2302}
2303
2304static int be_irq_register(struct be_adapter *adapter)
2305{
2306 struct net_device *netdev = adapter->netdev;
2307 int status;
2308
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002309 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310 status = be_msix_register(adapter);
2311 if (status == 0)
2312 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002313 /* INTx is not supported for VF */
2314 if (!be_physfn(adapter))
2315 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316 }
2317
2318 /* INTx */
2319 netdev->irq = adapter->pdev->irq;
2320 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2321 adapter);
2322 if (status) {
2323 dev_err(&adapter->pdev->dev,
2324 "INTx request IRQ failed - err %d\n", status);
2325 return status;
2326 }
2327done:
2328 adapter->isr_registered = true;
2329 return 0;
2330}
2331
2332static void be_irq_unregister(struct be_adapter *adapter)
2333{
2334 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002336 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337
2338 if (!adapter->isr_registered)
2339 return;
2340
2341 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002342 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002343 free_irq(netdev->irq, adapter);
2344 goto done;
2345 }
2346
2347 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002348 for_all_evt_queues(adapter, eqo, i)
2349 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002350
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351done:
2352 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353}
2354
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002355static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002356{
2357 struct be_queue_info *q;
2358 struct be_rx_obj *rxo;
2359 int i;
2360
2361 for_all_rx_queues(adapter, rxo, i) {
2362 q = &rxo->q;
2363 if (q->created) {
2364 be_cmd_rxq_destroy(adapter, q);
2365 /* After the rxq is invalidated, wait for a grace time
2366 * of 1ms for all dma to end and the flush compl to
2367 * arrive
2368 */
2369 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002370 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002371 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002373 }
2374}
2375
Sathya Perla889cd4b2010-05-30 23:33:45 +00002376static int be_close(struct net_device *netdev)
2377{
2378 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379 struct be_eq_obj *eqo;
2380 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002381
Parav Pandit045508a2012-03-26 14:27:13 +00002382 be_roce_dev_close(adapter);
2383
Sathya Perla889cd4b2010-05-30 23:33:45 +00002384 be_async_mcc_disable(adapter);
2385
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002386 if (!lancer_chip(adapter))
2387 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002388
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002389 for_all_evt_queues(adapter, eqo, i) {
2390 napi_disable(&eqo->napi);
2391 if (msix_enabled(adapter))
2392 synchronize_irq(be_msix_vec_get(adapter, eqo));
2393 else
2394 synchronize_irq(netdev->irq);
2395 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002396 }
2397
Sathya Perla889cd4b2010-05-30 23:33:45 +00002398 be_irq_unregister(adapter);
2399
Sathya Perla889cd4b2010-05-30 23:33:45 +00002400 /* Wait for all pending tx completions to arrive so that
2401 * all tx skbs are freed.
2402 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002403 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002404
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002405 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002406 return 0;
2407}
2408
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002409static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002410{
2411 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002412 int rc, i, j;
2413 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002414
2415 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002416 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2417 sizeof(struct be_eth_rx_d));
2418 if (rc)
2419 return rc;
2420 }
2421
2422 /* The FW would like the default RXQ to be created first */
2423 rxo = default_rxo(adapter);
2424 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2425 adapter->if_handle, false, &rxo->rss_id);
2426 if (rc)
2427 return rc;
2428
2429 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002430 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431 rx_frag_size, adapter->if_handle,
2432 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002433 if (rc)
2434 return rc;
2435 }
2436
2437 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002438 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2439 for_all_rss_queues(adapter, rxo, i) {
2440 if ((j + i) >= 128)
2441 break;
2442 rsstable[j + i] = rxo->rss_id;
2443 }
2444 }
2445 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002446 if (rc)
2447 return rc;
2448 }
2449
2450 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002451 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002452 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002453 return 0;
2454}
2455
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002456static int be_open(struct net_device *netdev)
2457{
2458 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002459 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002460 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002461 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002462 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002463 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002464
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002466 if (status)
2467 goto err;
2468
Sathya Perla5fb379e2009-06-18 00:02:59 +00002469 be_irq_register(adapter);
2470
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002471 if (!lancer_chip(adapter))
2472 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002473
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002474 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002475 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002476
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002477 for_all_tx_queues(adapter, txo, i)
2478 be_cq_notify(adapter, txo->cq.id, true, 0);
2479
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002480 be_async_mcc_enable(adapter);
2481
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002482 for_all_evt_queues(adapter, eqo, i) {
2483 napi_enable(&eqo->napi);
2484 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2485 }
2486
Sathya Perla323ff712012-09-28 04:39:43 +00002487 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002488 if (!status)
2489 be_link_status_update(adapter, link_status);
2490
Parav Pandit045508a2012-03-26 14:27:13 +00002491 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002492 return 0;
2493err:
2494 be_close(adapter->netdev);
2495 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002496}
2497
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002498static int be_setup_wol(struct be_adapter *adapter, bool enable)
2499{
2500 struct be_dma_mem cmd;
2501 int status = 0;
2502 u8 mac[ETH_ALEN];
2503
2504 memset(mac, 0, ETH_ALEN);
2505
2506 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002507 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2508 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002509 if (cmd.va == NULL)
2510 return -1;
2511 memset(cmd.va, 0, cmd.size);
2512
2513 if (enable) {
2514 status = pci_write_config_dword(adapter->pdev,
2515 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2516 if (status) {
2517 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002518 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002519 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2520 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002521 return status;
2522 }
2523 status = be_cmd_enable_magic_wol(adapter,
2524 adapter->netdev->dev_addr, &cmd);
2525 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2526 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2527 } else {
2528 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2529 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2530 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2531 }
2532
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002533 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002534 return status;
2535}
2536
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002537/*
2538 * Generate a seed MAC address from the PF MAC Address using jhash.
2539 * MAC Address for VFs are assigned incrementally starting from the seed.
2540 * These addresses are programmed in the ASIC by the PF and the VF driver
2541 * queries for the MAC address during its probe.
2542 */
2543static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2544{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002545 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002546 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002547 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002548 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002549
2550 be_vf_eth_addr_generate(adapter, mac);
2551
Sathya Perla11ac75e2011-12-13 00:58:50 +00002552 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002553 if (lancer_chip(adapter)) {
2554 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2555 } else {
2556 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002557 vf_cfg->if_handle,
2558 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002559 }
2560
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002561 if (status)
2562 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002563 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002564 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002565 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002566
2567 mac[5] += 1;
2568 }
2569 return status;
2570}
2571
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002572static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002573{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002574 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002575 u32 vf;
2576
Sathya Perla39f1d942012-05-08 19:41:24 +00002577 if (be_find_vfs(adapter, ASSIGNED)) {
2578 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2579 goto done;
2580 }
2581
Sathya Perla11ac75e2011-12-13 00:58:50 +00002582 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002583 if (lancer_chip(adapter))
2584 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2585 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002586 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2587 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002588
Sathya Perla11ac75e2011-12-13 00:58:50 +00002589 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2590 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002591 pci_disable_sriov(adapter->pdev);
2592done:
2593 kfree(adapter->vf_cfg);
2594 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002595}
2596
Sathya Perlaa54769f2011-10-24 02:45:00 +00002597static int be_clear(struct be_adapter *adapter)
2598{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002599 int i = 1;
2600
Sathya Perla191eb752012-02-23 18:50:13 +00002601 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2602 cancel_delayed_work_sync(&adapter->work);
2603 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2604 }
2605
Sathya Perla11ac75e2011-12-13 00:58:50 +00002606 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002607 be_vf_clear(adapter);
2608
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002609 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2610 be_cmd_pmac_del(adapter, adapter->if_handle,
2611 adapter->pmac_id[i], 0);
2612
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002613 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002614
2615 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002616 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002617 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002618 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002619
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002620 kfree(adapter->pmac_id);
2621 adapter->pmac_id = NULL;
2622
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002623 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002624 return 0;
2625}
2626
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002627static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2628 u32 *cap_flags, u8 domain)
2629{
2630 bool profile_present = false;
2631 int status;
2632
2633 if (lancer_chip(adapter)) {
2634 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2635 if (!status)
2636 profile_present = true;
2637 }
2638
2639 if (!profile_present)
2640 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2641 BE_IF_FLAGS_MULTICAST;
2642}
2643
Sathya Perla39f1d942012-05-08 19:41:24 +00002644static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002645{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002646 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002647 int vf;
2648
Sathya Perla39f1d942012-05-08 19:41:24 +00002649 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2650 GFP_KERNEL);
2651 if (!adapter->vf_cfg)
2652 return -ENOMEM;
2653
Sathya Perla11ac75e2011-12-13 00:58:50 +00002654 for_all_vfs(adapter, vf_cfg, vf) {
2655 vf_cfg->if_handle = -1;
2656 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002657 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002658 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002659}
2660
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002661static int be_vf_setup(struct be_adapter *adapter)
2662{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002663 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002664 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002665 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002666 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002667 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002668
Sathya Perla39f1d942012-05-08 19:41:24 +00002669 enabled_vfs = be_find_vfs(adapter, ENABLED);
2670 if (enabled_vfs) {
2671 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2672 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2673 return 0;
2674 }
2675
2676 if (num_vfs > adapter->dev_num_vfs) {
2677 dev_warn(dev, "Device supports %d VFs and not %d\n",
2678 adapter->dev_num_vfs, num_vfs);
2679 num_vfs = adapter->dev_num_vfs;
2680 }
2681
2682 status = pci_enable_sriov(adapter->pdev, num_vfs);
2683 if (!status) {
2684 adapter->num_vfs = num_vfs;
2685 } else {
2686 /* Platform doesn't support SRIOV though device supports it */
2687 dev_warn(dev, "SRIOV enable failed\n");
2688 return 0;
2689 }
2690
2691 status = be_vf_setup_init(adapter);
2692 if (status)
2693 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002694
Sathya Perla11ac75e2011-12-13 00:58:50 +00002695 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002696 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2697
2698 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2699 BE_IF_FLAGS_BROADCAST |
2700 BE_IF_FLAGS_MULTICAST);
2701
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002702 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2703 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002704 if (status)
2705 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002706 }
2707
Sathya Perla39f1d942012-05-08 19:41:24 +00002708 if (!enabled_vfs) {
2709 status = be_vf_eth_addr_config(adapter);
2710 if (status)
2711 goto err;
2712 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002713
Sathya Perla11ac75e2011-12-13 00:58:50 +00002714 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002715 lnk_speed = 1000;
2716 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002717 if (status)
2718 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002719 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002720
2721 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2722 vf + 1, vf_cfg->if_handle);
2723 if (status)
2724 goto err;
2725 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002726 }
2727 return 0;
2728err:
2729 return status;
2730}
2731
Sathya Perla30128032011-11-10 19:17:57 +00002732static void be_setup_init(struct be_adapter *adapter)
2733{
2734 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002735 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002736 adapter->if_handle = -1;
2737 adapter->be3_native = false;
2738 adapter->promiscuous = false;
2739 adapter->eq_next_idx = 0;
2740}
2741
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002742static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2743 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002744{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002745 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002746
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002747 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2748 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2749 if (!lancer_chip(adapter) && !be_physfn(adapter))
2750 *active_mac = true;
2751 else
2752 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002753
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002754 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002755 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002756
2757 if (lancer_chip(adapter)) {
2758 status = be_cmd_get_mac_from_list(adapter, mac,
2759 active_mac, pmac_id, 0);
2760 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002761 status = be_cmd_mac_addr_query(adapter, mac, false,
2762 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002763 }
2764 } else if (be_physfn(adapter)) {
2765 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002766 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002767 *active_mac = false;
2768 } else {
2769 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002770 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002771 if_handle, 0);
2772 *active_mac = true;
2773 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002774 return status;
2775}
2776
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002777static void be_get_resources(struct be_adapter *adapter)
2778{
2779 int status;
2780 bool profile_present = false;
2781
2782 if (lancer_chip(adapter)) {
2783 status = be_cmd_get_func_config(adapter);
2784
2785 if (!status)
2786 profile_present = true;
2787 }
2788
2789 if (profile_present) {
2790 /* Sanity fixes for Lancer */
2791 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2792 BE_UC_PMAC_COUNT);
2793 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2794 BE_NUM_VLANS_SUPPORTED);
2795 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2796 BE_MAX_MC);
2797 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2798 MAX_TX_QS);
2799 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2800 BE3_MAX_RSS_QS);
2801 adapter->max_event_queues = min_t(u16,
2802 adapter->max_event_queues,
2803 BE3_MAX_RSS_QS);
2804
2805 if (adapter->max_rss_queues &&
2806 adapter->max_rss_queues == adapter->max_rx_queues)
2807 adapter->max_rss_queues -= 1;
2808
2809 if (adapter->max_event_queues < adapter->max_rss_queues)
2810 adapter->max_rss_queues = adapter->max_event_queues;
2811
2812 } else {
2813 if (be_physfn(adapter))
2814 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2815 else
2816 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2817
2818 if (adapter->function_mode & FLEX10_MODE)
2819 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2820 else
2821 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2822
2823 adapter->max_mcast_mac = BE_MAX_MC;
2824 adapter->max_tx_queues = MAX_TX_QS;
2825 adapter->max_rss_queues = (adapter->be3_native) ?
2826 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2827 adapter->max_event_queues = BE3_MAX_RSS_QS;
2828
2829 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2830 BE_IF_FLAGS_BROADCAST |
2831 BE_IF_FLAGS_MULTICAST |
2832 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2833 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2834 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2835 BE_IF_FLAGS_PROMISCUOUS;
2836
2837 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2838 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2839 }
2840}
2841
Sathya Perla39f1d942012-05-08 19:41:24 +00002842/* Routine to query per function resource limits */
2843static int be_get_config(struct be_adapter *adapter)
2844{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002845 int pos, status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002846 u16 dev_num_vfs;
2847
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002848 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2849 &adapter->function_mode,
2850 &adapter->function_caps);
2851 if (status)
2852 goto err;
2853
2854 be_get_resources(adapter);
2855
2856 /* primary mac needs 1 pmac entry */
2857 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2858 sizeof(u32), GFP_KERNEL);
2859 if (!adapter->pmac_id) {
2860 status = -ENOMEM;
2861 goto err;
2862 }
2863
Sathya Perla39f1d942012-05-08 19:41:24 +00002864 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2865 if (pos) {
2866 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2867 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002868 if (!lancer_chip(adapter))
2869 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002870 adapter->dev_num_vfs = dev_num_vfs;
2871 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002872err:
2873 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002874}
2875
Sathya Perla5fb379e2009-06-18 00:02:59 +00002876static int be_setup(struct be_adapter *adapter)
2877{
Sathya Perla39f1d942012-05-08 19:41:24 +00002878 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002879 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002880 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002881 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002882 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002883 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002884
Sathya Perla30128032011-11-10 19:17:57 +00002885 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002886
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002887 if (!lancer_chip(adapter))
2888 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002889
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002890 status = be_get_config(adapter);
2891 if (status)
2892 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002893
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002894 be_msix_enable(adapter);
2895
2896 status = be_evt_queues_create(adapter);
2897 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002898 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002900 status = be_tx_cqs_create(adapter);
2901 if (status)
2902 goto err;
2903
2904 status = be_rx_cqs_create(adapter);
2905 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002906 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002907
Sathya Perla5fb379e2009-06-18 00:02:59 +00002908 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002909 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002910 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002911
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002912 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2913 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002914
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002915 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002916 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002917
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002918 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002919
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002920 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002921 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002922 if (status != 0)
2923 goto err;
2924
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002925 memset(mac, 0, ETH_ALEN);
2926 active_mac = false;
2927 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2928 &active_mac, &adapter->pmac_id[0]);
2929 if (status != 0)
2930 goto err;
2931
2932 if (!active_mac) {
2933 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2934 &adapter->pmac_id[0], 0);
2935 if (status != 0)
2936 goto err;
2937 }
2938
2939 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2940 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2941 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002942 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002943
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002944 status = be_tx_qs_create(adapter);
2945 if (status)
2946 goto err;
2947
Sathya Perla04b71172011-09-27 13:30:27 -04002948 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002949
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002950 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002951 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002952
2953 be_set_rx_mode(adapter->netdev);
2954
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002955 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002956
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002957 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2958 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002959 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002960
Sathya Perla39f1d942012-05-08 19:41:24 +00002961 if (be_physfn(adapter) && num_vfs) {
2962 if (adapter->dev_num_vfs)
2963 be_vf_setup(adapter);
2964 else
2965 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002966 }
2967
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002968 be_cmd_get_phy_info(adapter);
2969 if (be_pause_supported(adapter))
2970 adapter->phy.fc_autoneg = 1;
2971
Sathya Perla191eb752012-02-23 18:50:13 +00002972 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2973 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002974 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002975err:
2976 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002977 return status;
2978}
2979
Ivan Vecera66268732011-12-08 01:31:21 +00002980#ifdef CONFIG_NET_POLL_CONTROLLER
2981static void be_netpoll(struct net_device *netdev)
2982{
2983 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002984 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002985 int i;
2986
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002987 for_all_evt_queues(adapter, eqo, i)
2988 event_handle(eqo);
2989
2990 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002991}
2992#endif
2993
Ajit Khaparde84517482009-09-04 03:12:16 +00002994#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002995char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2996
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002997static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002998 const u8 *p, u32 img_start, int image_size,
2999 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003000{
3001 u32 crc_offset;
3002 u8 flashed_crc[4];
3003 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003004
3005 crc_offset = hdr_size + img_start + image_size - 4;
3006
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003007 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003008
3009 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003010 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003011 if (status) {
3012 dev_err(&adapter->pdev->dev,
3013 "could not get crc from flash, not flashing redboot\n");
3014 return false;
3015 }
3016
3017 /*update redboot only if crc does not match*/
3018 if (!memcmp(flashed_crc, p, 4))
3019 return false;
3020 else
3021 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003022}
3023
Sathya Perla306f1342011-08-02 19:57:45 +00003024static bool phy_flashing_required(struct be_adapter *adapter)
3025{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003026 return (adapter->phy.phy_type == TN_8022 &&
3027 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003028}
3029
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003030static bool is_comp_in_ufi(struct be_adapter *adapter,
3031 struct flash_section_info *fsec, int type)
3032{
3033 int i = 0, img_type = 0;
3034 struct flash_section_info_g2 *fsec_g2 = NULL;
3035
3036 if (adapter->generation != BE_GEN3)
3037 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3038
3039 for (i = 0; i < MAX_FLASH_COMP; i++) {
3040 if (fsec_g2)
3041 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3042 else
3043 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3044
3045 if (img_type == type)
3046 return true;
3047 }
3048 return false;
3049
3050}
3051
3052struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3053 int header_size,
3054 const struct firmware *fw)
3055{
3056 struct flash_section_info *fsec = NULL;
3057 const u8 *p = fw->data;
3058
3059 p += header_size;
3060 while (p < (fw->data + fw->size)) {
3061 fsec = (struct flash_section_info *)p;
3062 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3063 return fsec;
3064 p += 32;
3065 }
3066 return NULL;
3067}
3068
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003069static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003070 const struct firmware *fw,
3071 struct be_dma_mem *flash_cmd,
3072 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003073
Ajit Khaparde84517482009-09-04 03:12:16 +00003074{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003075 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003076 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003077 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00003078 int num_bytes;
3079 const u8 *p = fw->data;
3080 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08003081 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003082 int num_comp, hdr_size;
3083 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003084
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003085 struct flash_comp gen3_flash_types[] = {
3086 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3087 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3088 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3089 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3090 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3091 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3092 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3093 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3094 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3095 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3096 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3097 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3098 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3099 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3100 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3101 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3102 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3103 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3104 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3105 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003106 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003107
3108 struct flash_comp gen2_flash_types[] = {
3109 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3110 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3111 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3112 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3113 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3114 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3115 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3116 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3117 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3118 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3119 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3120 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3121 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3122 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3123 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3124 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003125 };
3126
3127 if (adapter->generation == BE_GEN3) {
3128 pflashcomp = gen3_flash_types;
3129 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003130 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003131 } else {
3132 pflashcomp = gen2_flash_types;
3133 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003134 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003135 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003136 /* Get flash section info*/
3137 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3138 if (!fsec) {
3139 dev_err(&adapter->pdev->dev,
3140 "Invalid Cookie. UFI corrupted ?\n");
3141 return -1;
3142 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003143 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003144 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003145 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003146
3147 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3148 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3149 continue;
3150
3151 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003152 if (!phy_flashing_required(adapter))
3153 continue;
3154 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003155
3156 hdr_size = filehdr_size +
3157 (num_of_images * sizeof(struct image_hdr));
3158
3159 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3160 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3161 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003162 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003163
3164 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003165 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003166 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003167 if (p + pflashcomp[i].size > fw->data + fw->size)
3168 return -1;
3169 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003170 while (total_bytes) {
3171 if (total_bytes > 32*1024)
3172 num_bytes = 32*1024;
3173 else
3174 num_bytes = total_bytes;
3175 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003176 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003177 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003178 flash_op = FLASHROM_OPER_PHY_FLASH;
3179 else
3180 flash_op = FLASHROM_OPER_FLASH;
3181 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003182 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003183 flash_op = FLASHROM_OPER_PHY_SAVE;
3184 else
3185 flash_op = FLASHROM_OPER_SAVE;
3186 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003187 memcpy(req->params.data_buf, p, num_bytes);
3188 p += num_bytes;
3189 status = be_cmd_write_flashrom(adapter, flash_cmd,
3190 pflashcomp[i].optype, flash_op, num_bytes);
3191 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003192 if ((status == ILLEGAL_IOCTL_REQ) &&
3193 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003194 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003195 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003196 dev_err(&adapter->pdev->dev,
3197 "cmd to write to flash rom failed.\n");
3198 return -1;
3199 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003200 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003201 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003202 return 0;
3203}
3204
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003205static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3206{
3207 if (fhdr == NULL)
3208 return 0;
3209 if (fhdr->build[0] == '3')
3210 return BE_GEN3;
3211 else if (fhdr->build[0] == '2')
3212 return BE_GEN2;
3213 else
3214 return 0;
3215}
3216
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003217static int lancer_wait_idle(struct be_adapter *adapter)
3218{
3219#define SLIPORT_IDLE_TIMEOUT 30
3220 u32 reg_val;
3221 int status = 0, i;
3222
3223 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3224 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3225 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3226 break;
3227
3228 ssleep(1);
3229 }
3230
3231 if (i == SLIPORT_IDLE_TIMEOUT)
3232 status = -1;
3233
3234 return status;
3235}
3236
3237static int lancer_fw_reset(struct be_adapter *adapter)
3238{
3239 int status = 0;
3240
3241 status = lancer_wait_idle(adapter);
3242 if (status)
3243 return status;
3244
3245 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3246 PHYSDEV_CONTROL_OFFSET);
3247
3248 return status;
3249}
3250
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003251static int lancer_fw_download(struct be_adapter *adapter,
3252 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003253{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003254#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3255#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3256 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003257 const u8 *data_ptr = NULL;
3258 u8 *dest_image_ptr = NULL;
3259 size_t image_size = 0;
3260 u32 chunk_size = 0;
3261 u32 data_written = 0;
3262 u32 offset = 0;
3263 int status = 0;
3264 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003265 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003266
3267 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3268 dev_err(&adapter->pdev->dev,
3269 "FW Image not properly aligned. "
3270 "Length must be 4 byte aligned.\n");
3271 status = -EINVAL;
3272 goto lancer_fw_exit;
3273 }
3274
3275 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3276 + LANCER_FW_DOWNLOAD_CHUNK;
3277 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3278 &flash_cmd.dma, GFP_KERNEL);
3279 if (!flash_cmd.va) {
3280 status = -ENOMEM;
3281 dev_err(&adapter->pdev->dev,
3282 "Memory allocation failure while flashing\n");
3283 goto lancer_fw_exit;
3284 }
3285
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003286 dest_image_ptr = flash_cmd.va +
3287 sizeof(struct lancer_cmd_req_write_object);
3288 image_size = fw->size;
3289 data_ptr = fw->data;
3290
3291 while (image_size) {
3292 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3293
3294 /* Copy the image chunk content. */
3295 memcpy(dest_image_ptr, data_ptr, chunk_size);
3296
3297 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003298 chunk_size, offset,
3299 LANCER_FW_DOWNLOAD_LOCATION,
3300 &data_written, &change_status,
3301 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003302 if (status)
3303 break;
3304
3305 offset += data_written;
3306 data_ptr += data_written;
3307 image_size -= data_written;
3308 }
3309
3310 if (!status) {
3311 /* Commit the FW written */
3312 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003313 0, offset,
3314 LANCER_FW_DOWNLOAD_LOCATION,
3315 &data_written, &change_status,
3316 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003317 }
3318
3319 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3320 flash_cmd.dma);
3321 if (status) {
3322 dev_err(&adapter->pdev->dev,
3323 "Firmware load error. "
3324 "Status code: 0x%x Additional Status: 0x%x\n",
3325 status, add_status);
3326 goto lancer_fw_exit;
3327 }
3328
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003329 if (change_status == LANCER_FW_RESET_NEEDED) {
3330 status = lancer_fw_reset(adapter);
3331 if (status) {
3332 dev_err(&adapter->pdev->dev,
3333 "Adapter busy for FW reset.\n"
3334 "New FW will not be active.\n");
3335 goto lancer_fw_exit;
3336 }
3337 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3338 dev_err(&adapter->pdev->dev,
3339 "System reboot required for new FW"
3340 " to be active\n");
3341 }
3342
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003343 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3344lancer_fw_exit:
3345 return status;
3346}
3347
3348static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3349{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003350 struct flash_file_hdr_g2 *fhdr;
3351 struct flash_file_hdr_g3 *fhdr3;
3352 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003353 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003354 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003355 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003356
3357 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003358 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003359
Ajit Khaparde84517482009-09-04 03:12:16 +00003360 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003361 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3362 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003363 if (!flash_cmd.va) {
3364 status = -ENOMEM;
3365 dev_err(&adapter->pdev->dev,
3366 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003367 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003368 }
3369
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003370 if ((adapter->generation == BE_GEN3) &&
3371 (get_ufigen_type(fhdr) == BE_GEN3)) {
3372 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003373 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3374 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003375 img_hdr_ptr = (struct image_hdr *) (fw->data +
3376 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003377 i * sizeof(struct image_hdr)));
3378 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3379 status = be_flash_data(adapter, fw, &flash_cmd,
3380 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003381 }
3382 } else if ((adapter->generation == BE_GEN2) &&
3383 (get_ufigen_type(fhdr) == BE_GEN2)) {
3384 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3385 } else {
3386 dev_err(&adapter->pdev->dev,
3387 "UFI and Interface are not compatible for flashing\n");
3388 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003389 }
3390
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003391 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3392 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003393 if (status) {
3394 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003395 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003396 }
3397
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003398 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003399
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003400be_fw_exit:
3401 return status;
3402}
3403
3404int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3405{
3406 const struct firmware *fw;
3407 int status;
3408
3409 if (!netif_running(adapter->netdev)) {
3410 dev_err(&adapter->pdev->dev,
3411 "Firmware load not allowed (interface is down)\n");
3412 return -1;
3413 }
3414
3415 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3416 if (status)
3417 goto fw_exit;
3418
3419 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3420
3421 if (lancer_chip(adapter))
3422 status = lancer_fw_download(adapter, fw);
3423 else
3424 status = be_fw_download(adapter, fw);
3425
Ajit Khaparde84517482009-09-04 03:12:16 +00003426fw_exit:
3427 release_firmware(fw);
3428 return status;
3429}
3430
stephen hemmingere5686ad2012-01-05 19:10:25 +00003431static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003432 .ndo_open = be_open,
3433 .ndo_stop = be_close,
3434 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003435 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003436 .ndo_set_mac_address = be_mac_addr_set,
3437 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003438 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003439 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003440 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3441 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003442 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003443 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003444 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003445 .ndo_get_vf_config = be_get_vf_config,
3446#ifdef CONFIG_NET_POLL_CONTROLLER
3447 .ndo_poll_controller = be_netpoll,
3448#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449};
3450
3451static void be_netdev_init(struct net_device *netdev)
3452{
3453 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003454 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003455 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003456
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003457 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003458 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3459 NETIF_F_HW_VLAN_TX;
3460 if (be_multi_rxq(adapter))
3461 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003462
3463 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003464 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003465
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003466 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003467 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003468
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003469 netdev->priv_flags |= IFF_UNICAST_FLT;
3470
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003471 netdev->flags |= IFF_MULTICAST;
3472
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003473 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003474
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003475 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003476
3477 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3478
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003479 for_all_evt_queues(adapter, eqo, i)
3480 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003481}
3482
3483static void be_unmap_pci_bars(struct be_adapter *adapter)
3484{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003485 if (adapter->csr)
3486 iounmap(adapter->csr);
3487 if (adapter->db)
3488 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003489 if (adapter->roce_db.base)
3490 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3491}
3492
3493static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3494{
3495 struct pci_dev *pdev = adapter->pdev;
3496 u8 __iomem *addr;
3497
3498 addr = pci_iomap(pdev, 2, 0);
3499 if (addr == NULL)
3500 return -ENOMEM;
3501
3502 adapter->roce_db.base = addr;
3503 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3504 adapter->roce_db.size = 8192;
3505 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3506 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003507}
3508
3509static int be_map_pci_bars(struct be_adapter *adapter)
3510{
3511 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003512 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003513
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003514 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003515 if (be_type_2_3(adapter)) {
3516 addr = ioremap_nocache(
3517 pci_resource_start(adapter->pdev, 0),
3518 pci_resource_len(adapter->pdev, 0));
3519 if (addr == NULL)
3520 return -ENOMEM;
3521 adapter->db = addr;
3522 }
3523 if (adapter->if_type == SLI_INTF_TYPE_3) {
3524 if (lancer_roce_map_pci_bars(adapter))
3525 goto pci_map_err;
3526 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003527 return 0;
3528 }
3529
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003530 if (be_physfn(adapter)) {
3531 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3532 pci_resource_len(adapter->pdev, 2));
3533 if (addr == NULL)
3534 return -ENOMEM;
3535 adapter->csr = addr;
3536 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003537
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003538 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003539 db_reg = 4;
3540 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003541 if (be_physfn(adapter))
3542 db_reg = 4;
3543 else
3544 db_reg = 0;
3545 }
3546 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3547 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003548 if (addr == NULL)
3549 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003550 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003551 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3552 adapter->roce_db.size = 4096;
3553 adapter->roce_db.io_addr =
3554 pci_resource_start(adapter->pdev, db_reg);
3555 adapter->roce_db.total_size =
3556 pci_resource_len(adapter->pdev, db_reg);
3557 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003558 return 0;
3559pci_map_err:
3560 be_unmap_pci_bars(adapter);
3561 return -ENOMEM;
3562}
3563
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003564static void be_ctrl_cleanup(struct be_adapter *adapter)
3565{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003566 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003567
3568 be_unmap_pci_bars(adapter);
3569
3570 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003571 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3572 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003573
Sathya Perla5b8821b2011-08-02 19:57:44 +00003574 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003575 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003576 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3577 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003578}
3579
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003580static int be_ctrl_init(struct be_adapter *adapter)
3581{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003582 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3583 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003584 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003585 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003586
3587 status = be_map_pci_bars(adapter);
3588 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003589 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003590
3591 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003592 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3593 mbox_mem_alloc->size,
3594 &mbox_mem_alloc->dma,
3595 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003596 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003597 status = -ENOMEM;
3598 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003599 }
3600 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3601 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3602 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3603 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003604
Sathya Perla5b8821b2011-08-02 19:57:44 +00003605 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3606 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3607 &rx_filter->dma, GFP_KERNEL);
3608 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003609 status = -ENOMEM;
3610 goto free_mbox;
3611 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003612 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003613 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003614 spin_lock_init(&adapter->mcc_lock);
3615 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003616
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003617 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003618 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003619 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003620
3621free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003622 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3623 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003624
3625unmap_pci_bars:
3626 be_unmap_pci_bars(adapter);
3627
3628done:
3629 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003630}
3631
3632static void be_stats_cleanup(struct be_adapter *adapter)
3633{
Sathya Perla3abcded2010-10-03 22:12:27 -07003634 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003635
3636 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003637 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3638 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003639}
3640
3641static int be_stats_init(struct be_adapter *adapter)
3642{
Sathya Perla3abcded2010-10-03 22:12:27 -07003643 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003644
Selvin Xavier005d5692011-05-16 07:36:35 +00003645 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003646 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003647 } else {
3648 if (lancer_chip(adapter))
3649 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3650 else
3651 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3652 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003653 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3654 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003655 if (cmd->va == NULL)
3656 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003657 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003658 return 0;
3659}
3660
3661static void __devexit be_remove(struct pci_dev *pdev)
3662{
3663 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003664
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003665 if (!adapter)
3666 return;
3667
Parav Pandit045508a2012-03-26 14:27:13 +00003668 be_roce_dev_remove(adapter);
3669
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003670 cancel_delayed_work_sync(&adapter->func_recovery_work);
3671
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003672 unregister_netdev(adapter->netdev);
3673
Sathya Perla5fb379e2009-06-18 00:02:59 +00003674 be_clear(adapter);
3675
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003676 /* tell fw we're done with firing cmds */
3677 be_cmd_fw_clean(adapter);
3678
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003679 be_stats_cleanup(adapter);
3680
3681 be_ctrl_cleanup(adapter);
3682
Sathya Perlad6b6d982012-09-05 01:56:48 +00003683 pci_disable_pcie_error_reporting(pdev);
3684
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003685 pci_set_drvdata(pdev, NULL);
3686 pci_release_regions(pdev);
3687 pci_disable_device(pdev);
3688
3689 free_netdev(adapter->netdev);
3690}
3691
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003692bool be_is_wol_supported(struct be_adapter *adapter)
3693{
3694 return ((adapter->wol_cap & BE_WOL_CAP) &&
3695 !be_is_wol_excluded(adapter)) ? true : false;
3696}
3697
Somnath Kotur941a77d2012-05-17 22:59:03 +00003698u32 be_get_fw_log_level(struct be_adapter *adapter)
3699{
3700 struct be_dma_mem extfat_cmd;
3701 struct be_fat_conf_params *cfgs;
3702 int status;
3703 u32 level = 0;
3704 int j;
3705
3706 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3707 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3708 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3709 &extfat_cmd.dma);
3710
3711 if (!extfat_cmd.va) {
3712 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3713 __func__);
3714 goto err;
3715 }
3716
3717 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3718 if (!status) {
3719 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3720 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003721 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003722 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3723 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3724 }
3725 }
3726 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3727 extfat_cmd.dma);
3728err:
3729 return level;
3730}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003731
Sathya Perla39f1d942012-05-08 19:41:24 +00003732static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003733{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003734 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003735 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003736
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003737 status = be_cmd_get_cntl_attributes(adapter);
3738 if (status)
3739 return status;
3740
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003741 status = be_cmd_get_acpi_wol_cap(adapter);
3742 if (status) {
3743 /* in case of a failure to get wol capabillities
3744 * check the exclusion list to determine WOL capability */
3745 if (!be_is_wol_excluded(adapter))
3746 adapter->wol_cap |= BE_WOL_CAP;
3747 }
3748
3749 if (be_is_wol_supported(adapter))
3750 adapter->wol = true;
3751
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003752 /* Must be a power of 2 or else MODULO will BUG_ON */
3753 adapter->be_get_temp_freq = 64;
3754
Somnath Kotur941a77d2012-05-17 22:59:03 +00003755 level = be_get_fw_log_level(adapter);
3756 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3757
Sathya Perla2243e2e2009-11-22 22:02:03 +00003758 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003759}
3760
Sathya Perla39f1d942012-05-08 19:41:24 +00003761static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003762{
3763 struct pci_dev *pdev = adapter->pdev;
3764 u32 sli_intf = 0, if_type;
3765
3766 switch (pdev->device) {
3767 case BE_DEVICE_ID1:
3768 case OC_DEVICE_ID1:
3769 adapter->generation = BE_GEN2;
3770 break;
3771 case BE_DEVICE_ID2:
3772 case OC_DEVICE_ID2:
3773 adapter->generation = BE_GEN3;
3774 break;
3775 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003776 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003777 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003778 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3779 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003780 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3781 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003782 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003783 !be_type_2_3(adapter)) {
3784 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3785 return -EINVAL;
3786 }
3787 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3788 SLI_INTF_FAMILY_SHIFT);
3789 adapter->generation = BE_GEN3;
3790 break;
3791 case OC_DEVICE_ID5:
3792 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3793 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003794 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3795 return -EINVAL;
3796 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003797 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3798 SLI_INTF_FAMILY_SHIFT);
3799 adapter->generation = BE_GEN3;
3800 break;
3801 default:
3802 adapter->generation = 0;
3803 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003804
3805 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3806 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003807 return 0;
3808}
3809
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003810static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003811{
3812 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003813
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003814 status = lancer_test_and_set_rdy_state(adapter);
3815 if (status)
3816 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003817
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003818 if (netif_running(adapter->netdev))
3819 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003820
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003821 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003822
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003823 adapter->hw_error = false;
3824 adapter->fw_timeout = false;
3825
3826 status = be_setup(adapter);
3827 if (status)
3828 goto err;
3829
3830 if (netif_running(adapter->netdev)) {
3831 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003832 if (status)
3833 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003834 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003835
3836 dev_err(&adapter->pdev->dev,
3837 "Adapter SLIPORT recovery succeeded\n");
3838 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003839err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003840 if (adapter->eeh_error)
3841 dev_err(&adapter->pdev->dev,
3842 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003843
3844 return status;
3845}
3846
3847static void be_func_recovery_task(struct work_struct *work)
3848{
3849 struct be_adapter *adapter =
3850 container_of(work, struct be_adapter, func_recovery_work.work);
3851 int status;
3852
3853 be_detect_error(adapter);
3854
3855 if (adapter->hw_error && lancer_chip(adapter)) {
3856
3857 if (adapter->eeh_error)
3858 goto out;
3859
3860 rtnl_lock();
3861 netif_device_detach(adapter->netdev);
3862 rtnl_unlock();
3863
3864 status = lancer_recover_func(adapter);
3865
3866 if (!status)
3867 netif_device_attach(adapter->netdev);
3868 }
3869
3870out:
3871 schedule_delayed_work(&adapter->func_recovery_work,
3872 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003873}
3874
3875static void be_worker(struct work_struct *work)
3876{
3877 struct be_adapter *adapter =
3878 container_of(work, struct be_adapter, work.work);
3879 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003880 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003881 int i;
3882
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003883 /* when interrupts are not yet enabled, just reap any pending
3884 * mcc completions */
3885 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00003886 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003887 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00003888 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003889 goto reschedule;
3890 }
3891
3892 if (!adapter->stats_cmd_sent) {
3893 if (lancer_chip(adapter))
3894 lancer_cmd_get_pport_stats(adapter,
3895 &adapter->stats_cmd);
3896 else
3897 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3898 }
3899
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003900 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3901 be_cmd_get_die_temperature(adapter);
3902
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003903 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003904 if (rxo->rx_post_starved) {
3905 rxo->rx_post_starved = false;
3906 be_post_rx_frags(rxo, GFP_KERNEL);
3907 }
3908 }
3909
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003910 for_all_evt_queues(adapter, eqo, i)
3911 be_eqd_update(adapter, eqo);
3912
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003913reschedule:
3914 adapter->work_counter++;
3915 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3916}
3917
Sathya Perla39f1d942012-05-08 19:41:24 +00003918static bool be_reset_required(struct be_adapter *adapter)
3919{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003920 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003921}
3922
Sathya Perlad3791422012-09-28 04:39:44 +00003923static char *mc_name(struct be_adapter *adapter)
3924{
3925 if (adapter->function_mode & FLEX10_MODE)
3926 return "FLEX10";
3927 else if (adapter->function_mode & VNIC_MODE)
3928 return "vNIC";
3929 else if (adapter->function_mode & UMC_ENABLED)
3930 return "UMC";
3931 else
3932 return "";
3933}
3934
3935static inline char *func_name(struct be_adapter *adapter)
3936{
3937 return be_physfn(adapter) ? "PF" : "VF";
3938}
3939
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003940static int __devinit be_probe(struct pci_dev *pdev,
3941 const struct pci_device_id *pdev_id)
3942{
3943 int status = 0;
3944 struct be_adapter *adapter;
3945 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003946 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003947
3948 status = pci_enable_device(pdev);
3949 if (status)
3950 goto do_none;
3951
3952 status = pci_request_regions(pdev, DRV_NAME);
3953 if (status)
3954 goto disable_dev;
3955 pci_set_master(pdev);
3956
Sathya Perla7f640062012-06-05 19:37:20 +00003957 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003958 if (netdev == NULL) {
3959 status = -ENOMEM;
3960 goto rel_reg;
3961 }
3962 adapter = netdev_priv(netdev);
3963 adapter->pdev = pdev;
3964 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003965
Sathya Perla39f1d942012-05-08 19:41:24 +00003966 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003967 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003968 goto free_netdev;
3969
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003970 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003971 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003972
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003973 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003974 if (!status) {
3975 netdev->features |= NETIF_F_HIGHDMA;
3976 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003977 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003978 if (status) {
3979 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3980 goto free_netdev;
3981 }
3982 }
3983
Sathya Perlad6b6d982012-09-05 01:56:48 +00003984 status = pci_enable_pcie_error_reporting(pdev);
3985 if (status)
3986 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3987
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003988 status = be_ctrl_init(adapter);
3989 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003990 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003991
Sathya Perla2243e2e2009-11-22 22:02:03 +00003992 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003993 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003994 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003995 if (status)
3996 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003997 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003998
3999 /* tell fw we're ready to fire cmds */
4000 status = be_cmd_fw_init(adapter);
4001 if (status)
4002 goto ctrl_clean;
4003
Sathya Perla39f1d942012-05-08 19:41:24 +00004004 if (be_reset_required(adapter)) {
4005 status = be_cmd_reset_function(adapter);
4006 if (status)
4007 goto ctrl_clean;
4008 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004009
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004010 /* The INTR bit may be set in the card when probed by a kdump kernel
4011 * after a crash.
4012 */
4013 if (!lancer_chip(adapter))
4014 be_intr_set(adapter, false);
4015
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004016 status = be_stats_init(adapter);
4017 if (status)
4018 goto ctrl_clean;
4019
Sathya Perla39f1d942012-05-08 19:41:24 +00004020 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004021 if (status)
4022 goto stats_clean;
4023
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004024 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004025 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004026 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004027
Sathya Perla5fb379e2009-06-18 00:02:59 +00004028 status = be_setup(adapter);
4029 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004030 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004031
Sathya Perla3abcded2010-10-03 22:12:27 -07004032 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004033 status = register_netdev(netdev);
4034 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004035 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004036
Parav Pandit045508a2012-03-26 14:27:13 +00004037 be_roce_dev_add(adapter);
4038
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004039 schedule_delayed_work(&adapter->func_recovery_work,
4040 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004041
4042 be_cmd_query_port_name(adapter, &port_name);
4043
Sathya Perlad3791422012-09-28 04:39:44 +00004044 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4045 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004046
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004047 return 0;
4048
Sathya Perla5fb379e2009-06-18 00:02:59 +00004049unsetup:
4050 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004051stats_clean:
4052 be_stats_cleanup(adapter);
4053ctrl_clean:
4054 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004055free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004056 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004057 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004058rel_reg:
4059 pci_release_regions(pdev);
4060disable_dev:
4061 pci_disable_device(pdev);
4062do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004063 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004064 return status;
4065}
4066
4067static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4068{
4069 struct be_adapter *adapter = pci_get_drvdata(pdev);
4070 struct net_device *netdev = adapter->netdev;
4071
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004072 if (adapter->wol)
4073 be_setup_wol(adapter, true);
4074
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004075 cancel_delayed_work_sync(&adapter->func_recovery_work);
4076
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004077 netif_device_detach(netdev);
4078 if (netif_running(netdev)) {
4079 rtnl_lock();
4080 be_close(netdev);
4081 rtnl_unlock();
4082 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004083 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004084
4085 pci_save_state(pdev);
4086 pci_disable_device(pdev);
4087 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4088 return 0;
4089}
4090
4091static int be_resume(struct pci_dev *pdev)
4092{
4093 int status = 0;
4094 struct be_adapter *adapter = pci_get_drvdata(pdev);
4095 struct net_device *netdev = adapter->netdev;
4096
4097 netif_device_detach(netdev);
4098
4099 status = pci_enable_device(pdev);
4100 if (status)
4101 return status;
4102
4103 pci_set_power_state(pdev, 0);
4104 pci_restore_state(pdev);
4105
Sathya Perla2243e2e2009-11-22 22:02:03 +00004106 /* tell fw we're ready to fire cmds */
4107 status = be_cmd_fw_init(adapter);
4108 if (status)
4109 return status;
4110
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004111 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004112 if (netif_running(netdev)) {
4113 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004114 be_open(netdev);
4115 rtnl_unlock();
4116 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004117
4118 schedule_delayed_work(&adapter->func_recovery_work,
4119 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004120 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004121
4122 if (adapter->wol)
4123 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004124
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004125 return 0;
4126}
4127
Sathya Perla82456b02010-02-17 01:35:37 +00004128/*
4129 * An FLR will stop BE from DMAing any data.
4130 */
4131static void be_shutdown(struct pci_dev *pdev)
4132{
4133 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004134
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004135 if (!adapter)
4136 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004137
Sathya Perla0f4a6822011-03-21 20:49:28 +00004138 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004139 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004140
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004141 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004142
Sathya Perla82456b02010-02-17 01:35:37 +00004143 if (adapter->wol)
4144 be_setup_wol(adapter, true);
4145
Ajit Khaparde57841862011-04-06 18:08:43 +00004146 be_cmd_reset_function(adapter);
4147
Sathya Perla82456b02010-02-17 01:35:37 +00004148 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004149}
4150
Sathya Perlacf588472010-02-14 21:22:01 +00004151static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4152 pci_channel_state_t state)
4153{
4154 struct be_adapter *adapter = pci_get_drvdata(pdev);
4155 struct net_device *netdev = adapter->netdev;
4156
4157 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004159 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004160
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004161 cancel_delayed_work_sync(&adapter->func_recovery_work);
4162
4163 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004164 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004165 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004166
4167 if (netif_running(netdev)) {
4168 rtnl_lock();
4169 be_close(netdev);
4170 rtnl_unlock();
4171 }
4172 be_clear(adapter);
4173
4174 if (state == pci_channel_io_perm_failure)
4175 return PCI_ERS_RESULT_DISCONNECT;
4176
4177 pci_disable_device(pdev);
4178
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004179 /* The error could cause the FW to trigger a flash debug dump.
4180 * Resetting the card while flash dump is in progress
4181 * can cause it not to recover; wait for it to finish
4182 */
4183 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00004184 return PCI_ERS_RESULT_NEED_RESET;
4185}
4186
4187static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4188{
4189 struct be_adapter *adapter = pci_get_drvdata(pdev);
4190 int status;
4191
4192 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004193 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004194
4195 status = pci_enable_device(pdev);
4196 if (status)
4197 return PCI_ERS_RESULT_DISCONNECT;
4198
4199 pci_set_master(pdev);
4200 pci_set_power_state(pdev, 0);
4201 pci_restore_state(pdev);
4202
4203 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004204 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004205 if (status)
4206 return PCI_ERS_RESULT_DISCONNECT;
4207
Sathya Perlad6b6d982012-09-05 01:56:48 +00004208 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004209 return PCI_ERS_RESULT_RECOVERED;
4210}
4211
4212static void be_eeh_resume(struct pci_dev *pdev)
4213{
4214 int status = 0;
4215 struct be_adapter *adapter = pci_get_drvdata(pdev);
4216 struct net_device *netdev = adapter->netdev;
4217
4218 dev_info(&adapter->pdev->dev, "EEH resume\n");
4219
4220 pci_save_state(pdev);
4221
4222 /* tell fw we're ready to fire cmds */
4223 status = be_cmd_fw_init(adapter);
4224 if (status)
4225 goto err;
4226
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004227 status = be_cmd_reset_function(adapter);
4228 if (status)
4229 goto err;
4230
Sathya Perlacf588472010-02-14 21:22:01 +00004231 status = be_setup(adapter);
4232 if (status)
4233 goto err;
4234
4235 if (netif_running(netdev)) {
4236 status = be_open(netdev);
4237 if (status)
4238 goto err;
4239 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004240
4241 schedule_delayed_work(&adapter->func_recovery_work,
4242 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004243 netif_device_attach(netdev);
4244 return;
4245err:
4246 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004247}
4248
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004249static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004250 .error_detected = be_eeh_err_detected,
4251 .slot_reset = be_eeh_reset,
4252 .resume = be_eeh_resume,
4253};
4254
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255static struct pci_driver be_driver = {
4256 .name = DRV_NAME,
4257 .id_table = be_dev_ids,
4258 .probe = be_probe,
4259 .remove = be_remove,
4260 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004261 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004262 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004263 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004264};
4265
4266static int __init be_init_module(void)
4267{
Joe Perches8e95a202009-12-03 07:58:21 +00004268 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4269 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004270 printk(KERN_WARNING DRV_NAME
4271 " : Module param rx_frag_size must be 2048/4096/8192."
4272 " Using 2048\n");
4273 rx_frag_size = 2048;
4274 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004276 return pci_register_driver(&be_driver);
4277}
4278module_init(be_init_module);
4279
4280static void __exit be_exit_module(void)
4281{
4282 pci_unregister_driver(&be_driver);
4283}
4284module_exit(be_exit_module);