blob: 9e7dbd59b57e3721622b070aec420ceda4743f32 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000158 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000204 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000223 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000561 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562}
563
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566{
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578}
579
Somnath Kotur93040ae2012-06-26 22:32:10 +0000580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
Somnath Koturcc4ce022010-10-21 07:11:14 -0700585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000588 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700589
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590 memset(hdr, 0, sizeof(*hdr));
591
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000594 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 }
616
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700617 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 }
622
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627}
628
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000630 bool unmap_single)
631{
632 dma_addr_t dma;
633
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000637 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000638 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 dma_unmap_single(dev, dma, wrb->frag_len,
640 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000641 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000643 }
644}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla3c8def92011-06-12 20:01:58 +0000646static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648{
Sathya Perla7101e112010-03-22 20:41:12 +0000649 dma_addr_t busaddr;
650 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000655 bool map_single = false;
656 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 hdr = queue_head_node(txq);
659 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000660 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700663 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000666 goto dma_err;
667 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
672 copied += len;
673 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674
David S. Millerebc8d2a2009-06-09 01:01:31 -0700675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000676 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700677 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000678 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000679 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000680 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000681 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700682 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000686 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 }
688
689 if (dummy_wrb) {
690 wrb = queue_head_node(txq);
691 wrb_fill(wrb, 0, 0);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 queue_head_inc(txq);
694 }
695
Somnath Koturcc4ce022010-10-21 07:11:14 -0700696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000700dma_err:
701 txq->head = map_head;
702 while (copied) {
703 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000704 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000705 map_single = false;
706 copied -= wrb->frag_len;
707 queue_head_inc(txq);
708 }
709 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710}
711
Somnath Kotur93040ae2012-06-26 22:32:10 +0000712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
Stephen Hemminger613573252009-08-31 19:50:58 +0000730static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700731 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732{
733 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000736 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000738 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 bool dummy_wrb, stopped = false;
740
Somnath Kotur93040ae2012-06-26 22:32:10 +0000741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
743
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000746 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 is_ipv4_pkt(skb)) {
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 }
752
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000760 if (unlikely(!skb))
761 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762 }
763
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765
Sathya Perla3c8def92011-06-12 20:01:58 +0000766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000767 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000768 int gso_segs = skb_shinfo(skb)->gso_segs;
769
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000770 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
Sathya Perla7101e112010-03-22 20:41:12 +0000778 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000782 stopped = true;
783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000785 be_txq_notify(adapter, txq->id, wrb_cnt);
786
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000792tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 */
Sathya Perla10329df2012-06-05 19:37:18 +0000818static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
Sathya Perla10329df2012-06-05 19:37:18 +0000820 u16 vids[BE_NUM_VLANS_SUPPORTED];
821 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000822 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000823
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
826 return 0;
827
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
830
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000834 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000835
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000837 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000838
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
840 if (status) {
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000845
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000847
848set_vlan_promisc:
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 NULL, 0, 1, 1);
851 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852}
853
Jiri Pirko8e586132011-12-08 19:52:37 -0500854static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855{
856 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000857 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000863
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000866 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500867
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000868 if (!status)
869 adapter->vlans_added++;
870 else
871 adapter->vlan_tag[vid] = 0;
872ret:
873 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874}
875
Jiri Pirko8e586132011-12-08 19:52:37 -0500876static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877{
878 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000879 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000881 if (!be_physfn(adapter)) {
882 status = -EINVAL;
883 goto ret;
884 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000885
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000887 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000888 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500889
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000890 if (!status)
891 adapter->vlans_added--;
892 else
893 adapter->vlan_tag[vid] = 1;
894ret:
895 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896}
897
Sathya Perlaa54769f2011-10-24 02:45:00 +0000898static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899{
900 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000901 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700902
903 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000905 adapter->promiscuous = true;
906 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000908
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300909 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000913
914 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000915 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000916 }
917
Sathya Perlae7b909a2009-11-22 22:01:10 +0000918 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000919 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000920 netdev_mc_count(netdev) > BE_MAX_MC) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000922 goto done;
923 }
924
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
928
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
932 }
933
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
937 goto done;
938 }
939
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 adapter->if_handle,
944 &adapter->pmac_id[adapter->uc_macs], 0);
945 }
946 }
947
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
951 if (status) {
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000956done:
957 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958}
959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000964 int status;
965
Sathya Perla11ac75e2011-12-13 00:58:50 +0000966 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000967 return -EPERM;
968
Sathya Perla11ac75e2011-12-13 00:58:50 +0000969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000970 return -EINVAL;
971
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000980 }
981
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000982 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000987
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000988 return status;
989}
990
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000991static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000996
Sathya Perla11ac75e2011-12-13 00:58:50 +0000997 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000998 return -EPERM;
999
Sathya Perla11ac75e2011-12-13 00:58:50 +00001000 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001001 return -EINVAL;
1002
1003 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001006 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001008
1009 return 0;
1010}
1011
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001012static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1014{
1015 struct be_adapter *adapter = netdev_priv(netdev);
1016 int status = 0;
1017
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001019 return -EPERM;
1020
Sathya Perla11ac75e2011-12-13 00:58:50 +00001021 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001022 return -EINVAL;
1023
1024 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1031 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001032 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001033 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001034 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001038 }
1039
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001040
1041 if (status)
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1044 return status;
1045}
1046
Ajit Khapardee1d18732010-07-23 01:52:13 +00001047static int be_set_vf_tx_rate(struct net_device *netdev,
1048 int vf, int rate)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 int status = 0;
1052
Sathya Perla11ac75e2011-12-13 00:58:50 +00001053 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001054 return -EPERM;
1055
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001056 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001057 return -EINVAL;
1058
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1062 return -EINVAL;
1063 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001064
Ajit Khaparde856c4012011-02-11 13:32:32 +00001065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001066
1067 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001068 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001069 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001070 else
1071 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001072 return status;
1073}
1074
Sathya Perla39f1d942012-05-08 19:41:24 +00001075static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076{
1077 struct pci_dev *dev, *pdev = adapter->pdev;
1078 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1079 u16 offset, stride;
1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001082 if (!pos)
1083 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) {
1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
Somnath Kotur7665de12012-06-24 19:42:00 +00001090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001092 vfs++;
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 assigned_vfs++;
1095 }
1096 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097 }
1098 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099}
1100
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001101static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001103 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001104 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001105 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001106 u64 pkts;
1107 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001109 if (!eqo->enable_aic) {
1110 eqd = eqo->eqd;
1111 goto modify_eqd;
1112 }
1113
1114 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001115 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001117 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118
Sathya Perla4097f662009-03-24 16:40:13 -07001119 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001120 if (time_before(now, stats->rx_jiffies)) {
1121 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001122 return;
1123 }
1124
Sathya Perlaac124ff2011-07-25 19:10:14 +00001125 /* Update once a second */
1126 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001127 return;
1128
Sathya Perlaab1594e2011-07-25 19:10:15 +00001129 do {
1130 start = u64_stats_fetch_begin_bh(&stats->sync);
1131 pkts = stats->rx_pkts;
1132 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001134 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001135 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001136 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001137 eqd = (stats->rx_pps / 110000) << 3;
1138 eqd = min(eqd, eqo->max_eqd);
1139 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001140 if (eqd < 10)
1141 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142
1143modify_eqd:
1144 if (eqd != eqo->cur_eqd) {
1145 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001147 }
Sathya Perla4097f662009-03-24 16:40:13 -07001148}
1149
Sathya Perla3abcded2010-10-03 22:12:27 -07001150static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001151 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001152{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001153 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001154
Sathya Perlaab1594e2011-07-25 19:10:15 +00001155 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001156 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001157 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001158 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001159 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001160 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001161 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001162 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001163 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164}
1165
Sathya Perla2e588f82011-03-11 02:49:26 +00001166static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001167{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001168 /* L4 checksum is not reliable for non TCP/UDP packets.
1169 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001170 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001172}
1173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001174static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001177 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001179 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180
Sathya Perla3abcded2010-10-03 22:12:27 -07001181 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 BUG_ON(!rx_page_info->page);
1183
Ajit Khaparde205859a2010-02-09 01:34:21 +00001184 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001185 dma_unmap_page(&adapter->pdev->dev,
1186 dma_unmap_addr(rx_page_info, bus),
1187 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001188 rx_page_info->last_page_user = false;
1189 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190
1191 atomic_dec(&rxq->used);
1192 return rx_page_info;
1193}
1194
1195/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001196static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198{
Sathya Perla3abcded2010-10-03 22:12:27 -07001199 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001201 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001203 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001204 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001205 put_page(page_info->page);
1206 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001207 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 }
1209}
1210
1211/*
1212 * skb_fill_rx_data forms a complete skb for an ether frame
1213 * indicated by rxcp.
1214 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001215static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217{
Sathya Perla3abcded2010-10-03 22:12:27 -07001218 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 u16 i, j;
1221 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 u8 *start;
1223
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001224 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225 start = page_address(page_info->page) + page_info->page_offset;
1226 prefetch(start);
1227
1228 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230
1231 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001232 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233 memcpy(skb->data, start, hdr_len);
1234 skb->len = curr_frag_len;
1235 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1236 /* Complete packet has now been moved to data */
1237 put_page(page_info->page);
1238 skb->data_len = 0;
1239 skb->tail += curr_frag_len;
1240 } else {
1241 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001242 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243 skb_shinfo(skb)->frags[0].page_offset =
1244 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001245 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001247 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 skb->tail += hdr_len;
1249 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001250 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251
Sathya Perla2e588f82011-03-11 02:49:26 +00001252 if (rxcp->pkt_size <= rx_frag_size) {
1253 BUG_ON(rxcp->num_rcvd != 1);
1254 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 }
1256
1257 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001258 index_inc(&rxcp->rxq_idx, rxq->len);
1259 remaining = rxcp->pkt_size - curr_frag_len;
1260 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001264 /* Coalesce all frags from the same physical page in one slot */
1265 if (page_info->page_offset == 0) {
1266 /* Fresh page */
1267 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001268 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001269 skb_shinfo(skb)->frags[j].page_offset =
1270 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001271 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001272 skb_shinfo(skb)->nr_frags++;
1273 } else {
1274 put_page(page_info->page);
1275 }
1276
Eric Dumazet9e903e02011-10-18 21:00:24 +00001277 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 skb->len += curr_frag_len;
1279 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001280 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001281 remaining -= curr_frag_len;
1282 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001283 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001285 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286}
1287
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001288/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001289static void be_rx_compl_process(struct be_rx_obj *rxo,
1290 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001292 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001293 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001295
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001296 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001297 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001298 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001299 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300 return;
1301 }
1302
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001303 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001305 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001306 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001307 else
1308 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001309
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001310 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001311 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001312 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001313 skb->rxhash = rxcp->rss_hash;
1314
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315
Jiri Pirko343e43c2011-08-25 02:50:51 +00001316 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
1319 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320}
1321
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001322/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001323void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001326 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001328 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001329 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001330 u16 remaining, curr_frag_len;
1331 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001333 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001334 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001335 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001336 return;
1337 }
1338
Sathya Perla2e588f82011-03-11 02:49:26 +00001339 remaining = rxcp->pkt_size;
1340 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001341 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342
1343 curr_frag_len = min(remaining, rx_frag_size);
1344
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001345 /* Coalesce all frags from the same physical page in one slot */
1346 if (i == 0 || page_info->page_offset == 0) {
1347 /* First frag or Fresh page */
1348 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001349 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001350 skb_shinfo(skb)->frags[j].page_offset =
1351 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001352 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001353 } else {
1354 put_page(page_info->page);
1355 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001356 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001357 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001359 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 memset(page_info, 0, sizeof(*page_info));
1361 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001362 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001364 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001365 skb->len = rxcp->pkt_size;
1366 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001367 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001369 if (adapter->netdev->features & NETIF_F_RXHASH)
1370 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001371
Jiri Pirko343e43c2011-08-25 02:50:51 +00001372 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001373 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001375 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376}
1377
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001378static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380{
Sathya Perla2e588f82011-03-11 02:49:26 +00001381 rxcp->pkt_size =
1382 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001386 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001387 rxcp->ip_csum =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389 rxcp->l4_csum =
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391 rxcp->ipv6 =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393 rxcp->rxq_idx =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395 rxcp->num_rcvd =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397 rxcp->pkt_type =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001399 rxcp->rss_hash =
1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001401 if (rxcp->vlanf) {
1402 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001403 compl);
1404 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001406 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001407 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001408}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001410static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001412{
1413 rxcp->pkt_size =
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001419 rxcp->ip_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421 rxcp->l4_csum =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423 rxcp->ipv6 =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425 rxcp->rxq_idx =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427 rxcp->num_rcvd =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429 rxcp->pkt_type =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001431 rxcp->rss_hash =
1432 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001433 if (rxcp->vlanf) {
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001435 compl);
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001438 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001440}
1441
1442static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443{
1444 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446 struct be_adapter *adapter = rxo->adapter;
1447
1448 /* For checking the valid bit it is Ok to use either definition as the
1449 * valid bit is at the same position in both v0 and v1 Rx compl */
1450 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 return NULL;
1452
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001453 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001454 be_dws_le_to_cpu(compl, sizeof(*compl));
1455
1456 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001457 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001458 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001459 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001460
Sathya Perla15d72182011-03-21 20:49:26 +00001461 if (rxcp->vlanf) {
1462 /* vlanf could be wrongly set in some cards.
1463 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001464 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001465 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001466
Sathya Perla15d72182011-03-21 20:49:26 +00001467 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001468 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001469
Somnath Kotur939cf302011-08-18 21:51:49 -07001470 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001471 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001472 rxcp->vlanf = 0;
1473 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001474
1475 /* As the compl has been parsed, reset it; we wont touch it again */
1476 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Sathya Perla3abcded2010-10-03 22:12:27 -07001478 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 return rxcp;
1480}
1481
Eric Dumazet1829b082011-03-01 05:48:12 +00001482static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001485
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001487 gfp |= __GFP_COMP;
1488 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489}
1490
1491/*
1492 * Allocate a page, split it to fragments of size rx_frag_size and post as
1493 * receive buffers to BE
1494 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001495static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496{
Sathya Perla3abcded2010-10-03 22:12:27 -07001497 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001498 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001499 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 struct page *pagep = NULL;
1501 struct be_eth_rx_d *rxd;
1502 u64 page_dmaaddr = 0, frag_dmaaddr;
1503 u32 posted, page_offset = 0;
1504
Sathya Perla3abcded2010-10-03 22:12:27 -07001505 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001508 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001510 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 break;
1512 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001513 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514 0, adapter->big_page_size,
1515 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 page_info->page_offset = 0;
1517 } else {
1518 get_page(pagep);
1519 page_info->page_offset = page_offset + rx_frag_size;
1520 }
1521 page_offset = page_info->page_offset;
1522 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001523 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525
1526 rxd = queue_head_node(rxq);
1527 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529
1530 /* Any space left in the current big page for another frag? */
1531 if ((page_offset + rx_frag_size + rx_frag_size) >
1532 adapter->big_page_size) {
1533 pagep = NULL;
1534 page_info->last_page_user = true;
1535 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001536
1537 prev_page_info = page_info;
1538 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001539 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 }
1541 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001542 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543
1544 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001546 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001547 } else if (atomic_read(&rxq->used) == 0) {
1548 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001549 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551}
1552
Sathya Perla5fb379e2009-06-18 00:02:59 +00001553static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556
1557 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558 return NULL;
1559
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001560 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562
1563 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564
1565 queue_tail_inc(tx_cq);
1566 return txcp;
1567}
1568
Sathya Perla3c8def92011-06-12 20:01:58 +00001569static u16 be_tx_compl_process(struct be_adapter *adapter,
1570 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571{
Sathya Perla3c8def92011-06-12 20:01:58 +00001572 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001573 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001574 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001576 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001579 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001581 sent_skbs[txq->tail] = NULL;
1582
1583 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001584 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001586 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001588 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001589 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001591 unmap_skb_hdr = false;
1592
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 num_wrbs++;
1594 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001595 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001598 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599}
1600
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601/* Return the number of events in the event queue */
1602static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001603{
1604 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001605 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001606
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001607 do {
1608 eqe = queue_tail_node(&eqo->q);
1609 if (eqe->evt == 0)
1610 break;
1611
1612 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001613 eqe->evt = 0;
1614 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001615 queue_tail_inc(&eqo->q);
1616 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001617
1618 return num;
1619}
1620
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001621static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001622{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001623 bool rearm = false;
1624 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001625
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001626 /* Deal with any spurious interrupts that come without events */
1627 if (!num)
1628 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001629
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001630 if (num || msix_enabled(eqo->adapter))
1631 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632
Sathya Perla859b1e42009-08-10 03:43:51 +00001633 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001634 napi_schedule(&eqo->napi);
1635
1636 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001637}
1638
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001639/* Leaves the EQ is disarmed state */
1640static void be_eq_clean(struct be_eq_obj *eqo)
1641{
1642 int num = events_get(eqo);
1643
1644 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1645}
1646
1647static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648{
1649 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001650 struct be_queue_info *rxq = &rxo->q;
1651 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 u16 tail;
1654
1655 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001656 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657 be_rx_compl_discard(rxo, rxcp);
1658 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 }
1660
1661 /* Then free posted rx buffer that were not used */
1662 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001663 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665 put_page(page_info->page);
1666 memset(page_info, 0, sizeof(*page_info));
1667 }
1668 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001669 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670}
1671
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001672static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001674 struct be_tx_obj *txo;
1675 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001676 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001677 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001678 struct sk_buff *sent_skb;
1679 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001680 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681
Sathya Perlaa8e91792009-08-10 03:42:43 +00001682 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1683 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001684 pending_txqs = adapter->num_tx_qs;
1685
1686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 while ((txcp = be_tx_compl_get(&txo->cq))) {
1689 end_idx =
1690 AMAP_GET_BITS(struct amap_eth_tx_compl,
1691 wrb_index, txcp);
1692 num_wrbs += be_tx_compl_process(adapter, txo,
1693 end_idx);
1694 cmpl++;
1695 }
1696 if (cmpl) {
1697 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698 atomic_sub(num_wrbs, &txq->used);
1699 cmpl = 0;
1700 num_wrbs = 0;
1701 }
1702 if (atomic_read(&txq->used) == 0)
1703 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001704 }
1705
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001706 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001707 break;
1708
1709 mdelay(1);
1710 } while (true);
1711
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001712 for_all_tx_queues(adapter, txo, i) {
1713 txq = &txo->q;
1714 if (atomic_read(&txq->used))
1715 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001717
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001718 /* free posted tx for which compls will never arrive */
1719 while (atomic_read(&txq->used)) {
1720 sent_skb = txo->sent_skb_list[txq->tail];
1721 end_idx = txq->tail;
1722 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723 &dummy_wrb);
1724 index_adv(&end_idx, num_wrbs - 1, txq->len);
1725 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726 atomic_sub(num_wrbs, &txq->used);
1727 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001728 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729}
1730
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001731static void be_evt_queues_destroy(struct be_adapter *adapter)
1732{
1733 struct be_eq_obj *eqo;
1734 int i;
1735
1736 for_all_evt_queues(adapter, eqo, i) {
1737 be_eq_clean(eqo);
1738 if (eqo->q.created)
1739 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1740 be_queue_free(adapter, &eqo->q);
1741 }
1742}
1743
1744static int be_evt_queues_create(struct be_adapter *adapter)
1745{
1746 struct be_queue_info *eq;
1747 struct be_eq_obj *eqo;
1748 int i, rc;
1749
1750 adapter->num_evt_qs = num_irqs(adapter);
1751
1752 for_all_evt_queues(adapter, eqo, i) {
1753 eqo->adapter = adapter;
1754 eqo->tx_budget = BE_TX_BUDGET;
1755 eqo->idx = i;
1756 eqo->max_eqd = BE_MAX_EQD;
1757 eqo->enable_aic = true;
1758
1759 eq = &eqo->q;
1760 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1761 sizeof(struct be_eq_entry));
1762 if (rc)
1763 return rc;
1764
1765 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1766 if (rc)
1767 return rc;
1768 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001769 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001770}
1771
Sathya Perla5fb379e2009-06-18 00:02:59 +00001772static void be_mcc_queues_destroy(struct be_adapter *adapter)
1773{
1774 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001775
Sathya Perla8788fdc2009-07-27 22:52:03 +00001776 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001777 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001778 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001779 be_queue_free(adapter, q);
1780
Sathya Perla8788fdc2009-07-27 22:52:03 +00001781 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001782 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001783 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001784 be_queue_free(adapter, q);
1785}
1786
1787/* Must be called only after TX qs are created as MCC shares TX EQ */
1788static int be_mcc_queues_create(struct be_adapter *adapter)
1789{
1790 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001791
Sathya Perla8788fdc2009-07-27 22:52:03 +00001792 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001793 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001794 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001795 goto err;
1796
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001797 /* Use the default EQ for MCC completions */
1798 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001799 goto mcc_cq_free;
1800
Sathya Perla8788fdc2009-07-27 22:52:03 +00001801 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001802 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1803 goto mcc_cq_destroy;
1804
Sathya Perla8788fdc2009-07-27 22:52:03 +00001805 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001806 goto mcc_q_free;
1807
1808 return 0;
1809
1810mcc_q_free:
1811 be_queue_free(adapter, q);
1812mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001813 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001814mcc_cq_free:
1815 be_queue_free(adapter, cq);
1816err:
1817 return -1;
1818}
1819
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820static void be_tx_queues_destroy(struct be_adapter *adapter)
1821{
1822 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001823 struct be_tx_obj *txo;
1824 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825
Sathya Perla3c8def92011-06-12 20:01:58 +00001826 for_all_tx_queues(adapter, txo, i) {
1827 q = &txo->q;
1828 if (q->created)
1829 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1830 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831
Sathya Perla3c8def92011-06-12 20:01:58 +00001832 q = &txo->cq;
1833 if (q->created)
1834 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1835 be_queue_free(adapter, q);
1836 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837}
1838
Sathya Perladafc0fe2011-10-24 02:45:02 +00001839static int be_num_txqs_want(struct be_adapter *adapter)
1840{
Sathya Perla39f1d942012-05-08 19:41:24 +00001841 if (sriov_want(adapter) || be_is_mc(adapter) ||
1842 lancer_chip(adapter) || !be_physfn(adapter) ||
1843 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001844 return 1;
1845 else
1846 return MAX_TX_QS;
1847}
1848
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001849static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001851 struct be_queue_info *cq, *eq;
1852 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001853 struct be_tx_obj *txo;
1854 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
Sathya Perladafc0fe2011-10-24 02:45:02 +00001856 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001857 if (adapter->num_tx_qs != MAX_TX_QS) {
1858 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001859 netif_set_real_num_tx_queues(adapter->netdev,
1860 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001861 rtnl_unlock();
1862 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001863
Sathya Perla3c8def92011-06-12 20:01:58 +00001864 for_all_tx_queues(adapter, txo, i) {
1865 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001866 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1867 sizeof(struct be_eth_tx_compl));
1868 if (status)
1869 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001871 /* If num_evt_qs is less than num_tx_qs, then more than
1872 * one txq share an eq
1873 */
1874 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1875 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1876 if (status)
1877 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001878 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880}
1881
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001882static int be_tx_qs_create(struct be_adapter *adapter)
1883{
1884 struct be_tx_obj *txo;
1885 int i, status;
1886
1887 for_all_tx_queues(adapter, txo, i) {
1888 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1889 sizeof(struct be_eth_wrb));
1890 if (status)
1891 return status;
1892
1893 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1894 if (status)
1895 return status;
1896 }
1897
1898 return 0;
1899}
1900
1901static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902{
1903 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001904 struct be_rx_obj *rxo;
1905 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906
Sathya Perla3abcded2010-10-03 22:12:27 -07001907 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001908 q = &rxo->cq;
1909 if (q->created)
1910 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1911 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913}
1914
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001916{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 struct be_rx_obj *rxo;
1919 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001921 /* We'll create as many RSS rings as there are irqs.
1922 * But when there's only one irq there's no use creating RSS rings
1923 */
1924 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1925 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001926 if (adapter->num_rx_qs != MAX_RX_QS) {
1927 rtnl_lock();
1928 netif_set_real_num_rx_queues(adapter->netdev,
1929 adapter->num_rx_qs);
1930 rtnl_unlock();
1931 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001932
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001934 for_all_rx_queues(adapter, rxo, i) {
1935 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001936 cq = &rxo->cq;
1937 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1938 sizeof(struct be_eth_rx_compl));
1939 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001940 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001942 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1943 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001944 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001946 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948 if (adapter->num_rx_qs != MAX_RX_QS)
1949 dev_info(&adapter->pdev->dev,
1950 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001953}
1954
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955static irqreturn_t be_intx(int irq, void *dev)
1956{
1957 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001958 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001960 /* With INTx only one EQ is used */
1961 num_evts = event_handle(&adapter->eq_obj[0]);
1962 if (num_evts)
1963 return IRQ_HANDLED;
1964 else
1965 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966}
1967
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001968static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001970 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001972 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 return IRQ_HANDLED;
1974}
1975
Sathya Perla2e588f82011-03-11 02:49:26 +00001976static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977{
Sathya Perla2e588f82011-03-11 02:49:26 +00001978 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979}
1980
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1982 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983{
Sathya Perla3abcded2010-10-03 22:12:27 -07001984 struct be_adapter *adapter = rxo->adapter;
1985 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001986 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987 u32 work_done;
1988
1989 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991 if (!rxcp)
1992 break;
1993
Sathya Perla12004ae2011-08-02 19:57:46 +00001994 /* Is it a flush compl that has no data */
1995 if (unlikely(rxcp->num_rcvd == 0))
1996 goto loop_continue;
1997
1998 /* Discard compl with partial DMA Lancer B0 */
1999 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002001 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002002 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002003
Sathya Perla12004ae2011-08-02 19:57:46 +00002004 /* On BE drop pkts that arrive due to imperfect filtering in
2005 * promiscuous mode on some skews
2006 */
2007 if (unlikely(rxcp->port != adapter->port_num &&
2008 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002009 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002010 goto loop_continue;
2011 }
2012
2013 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002014 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002015 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002017loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002018 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019 }
2020
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002021 if (work_done) {
2022 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002023
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2025 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002027
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028 return work_done;
2029}
2030
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2032 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002035 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002037 for (work_done = 0; work_done < budget; work_done++) {
2038 txcp = be_tx_compl_get(&txo->cq);
2039 if (!txcp)
2040 break;
2041 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002042 AMAP_GET_BITS(struct amap_eth_tx_compl,
2043 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 }
2045
2046 if (work_done) {
2047 be_cq_notify(adapter, txo->cq.id, true, work_done);
2048 atomic_sub(num_wrbs, &txo->q.used);
2049
2050 /* As Tx wrbs have been freed up, wake up netdev queue
2051 * if it was stopped due to lack of tx wrbs. */
2052 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2053 atomic_read(&txo->q.used) < txo->q.len / 2) {
2054 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002055 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002056
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2058 tx_stats(txo)->tx_compl += work_done;
2059 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2060 }
2061 return (work_done < budget); /* Done */
2062}
Sathya Perla3c8def92011-06-12 20:01:58 +00002063
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002064int be_poll(struct napi_struct *napi, int budget)
2065{
2066 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2067 struct be_adapter *adapter = eqo->adapter;
2068 int max_work = 0, work, i;
2069 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002070
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002071 /* Process all TXQs serviced by this EQ */
2072 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2073 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2074 eqo->tx_budget, i);
2075 if (!tx_done)
2076 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077 }
2078
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002079 /* This loop will iterate twice for EQ0 in which
2080 * completions of the last RXQ (default one) are also processed
2081 * For other EQs the loop iterates only once
2082 */
2083 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2084 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2085 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002086 }
2087
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002088 if (is_mcc_eqo(eqo))
2089 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002090
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002091 if (max_work < budget) {
2092 napi_complete(napi);
2093 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2094 } else {
2095 /* As we'll continue in polling mode, count and clear events */
2096 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002097 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002098 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099}
2100
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002101void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002102{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002103 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2104 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002105 u32 i;
2106
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002107 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002108 return;
2109
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002110 if (lancer_chip(adapter)) {
2111 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2112 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2113 sliport_err1 = ioread32(adapter->db +
2114 SLIPORT_ERROR1_OFFSET);
2115 sliport_err2 = ioread32(adapter->db +
2116 SLIPORT_ERROR2_OFFSET);
2117 }
2118 } else {
2119 pci_read_config_dword(adapter->pdev,
2120 PCICFG_UE_STATUS_LOW, &ue_lo);
2121 pci_read_config_dword(adapter->pdev,
2122 PCICFG_UE_STATUS_HIGH, &ue_hi);
2123 pci_read_config_dword(adapter->pdev,
2124 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2125 pci_read_config_dword(adapter->pdev,
2126 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002127
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002128 ue_lo = (ue_lo & ~ue_lo_mask);
2129 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002130 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002131
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002132 if (ue_lo || ue_hi ||
2133 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002134 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002135 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002136 "Error detected in the card\n");
2137 }
2138
2139 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2140 dev_err(&adapter->pdev->dev,
2141 "ERR: sliport status 0x%x\n", sliport_status);
2142 dev_err(&adapter->pdev->dev,
2143 "ERR: sliport error1 0x%x\n", sliport_err1);
2144 dev_err(&adapter->pdev->dev,
2145 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002146 }
2147
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002148 if (ue_lo) {
2149 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2150 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002151 dev_err(&adapter->pdev->dev,
2152 "UE: %s bit set\n", ue_status_low_desc[i]);
2153 }
2154 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002155
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002156 if (ue_hi) {
2157 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2158 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002159 dev_err(&adapter->pdev->dev,
2160 "UE: %s bit set\n", ue_status_hi_desc[i]);
2161 }
2162 }
2163
2164}
2165
Sathya Perla8d56ff12009-11-22 22:02:26 +00002166static void be_msix_disable(struct be_adapter *adapter)
2167{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002168 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002169 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002170 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002171 }
2172}
2173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174static uint be_num_rss_want(struct be_adapter *adapter)
2175{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002176 u32 num = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002177 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla39f1d942012-05-08 19:41:24 +00002178 !sriov_want(adapter) && be_physfn(adapter) &&
Yuval Mintz30e80b52012-07-01 03:19:00 +00002179 !be_is_mc(adapter)) {
2180 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2181 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2182 }
2183 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002184}
2185
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186static void be_msix_enable(struct be_adapter *adapter)
2187{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002188#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002189 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191 /* If RSS queues are not used, need a vec for default RX Q */
2192 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002193 if (be_roce_supported(adapter)) {
2194 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2195 (num_online_cpus() + 1));
2196 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2197 num_vec += num_roce_vec;
2198 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2199 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002201
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002202 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203 adapter->msix_entries[i].entry = i;
2204
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002205 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002206 if (status == 0) {
2207 goto done;
2208 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002209 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002210 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002211 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002213 }
2214 return;
2215done:
Parav Pandit045508a2012-03-26 14:27:13 +00002216 if (be_roce_supported(adapter)) {
2217 if (num_vec > num_roce_vec) {
2218 adapter->num_msix_vec = num_vec - num_roce_vec;
2219 adapter->num_msix_roce_vec =
2220 num_vec - adapter->num_msix_vec;
2221 } else {
2222 adapter->num_msix_vec = num_vec;
2223 adapter->num_msix_roce_vec = 0;
2224 }
2225 } else
2226 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002227 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228}
2229
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002230static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234}
2235
2236static int be_msix_register(struct be_adapter *adapter)
2237{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238 struct net_device *netdev = adapter->netdev;
2239 struct be_eq_obj *eqo;
2240 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 for_all_evt_queues(adapter, eqo, i) {
2243 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2244 vec = be_msix_vec_get(adapter, eqo);
2245 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002246 if (status)
2247 goto err_msix;
2248 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002249
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002251err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2253 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2254 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2255 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002256 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257 return status;
2258}
2259
2260static int be_irq_register(struct be_adapter *adapter)
2261{
2262 struct net_device *netdev = adapter->netdev;
2263 int status;
2264
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002265 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266 status = be_msix_register(adapter);
2267 if (status == 0)
2268 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002269 /* INTx is not supported for VF */
2270 if (!be_physfn(adapter))
2271 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 }
2273
2274 /* INTx */
2275 netdev->irq = adapter->pdev->irq;
2276 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2277 adapter);
2278 if (status) {
2279 dev_err(&adapter->pdev->dev,
2280 "INTx request IRQ failed - err %d\n", status);
2281 return status;
2282 }
2283done:
2284 adapter->isr_registered = true;
2285 return 0;
2286}
2287
2288static void be_irq_unregister(struct be_adapter *adapter)
2289{
2290 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002292 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293
2294 if (!adapter->isr_registered)
2295 return;
2296
2297 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002298 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299 free_irq(netdev->irq, adapter);
2300 goto done;
2301 }
2302
2303 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304 for_all_evt_queues(adapter, eqo, i)
2305 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002306
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307done:
2308 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309}
2310
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002312{
2313 struct be_queue_info *q;
2314 struct be_rx_obj *rxo;
2315 int i;
2316
2317 for_all_rx_queues(adapter, rxo, i) {
2318 q = &rxo->q;
2319 if (q->created) {
2320 be_cmd_rxq_destroy(adapter, q);
2321 /* After the rxq is invalidated, wait for a grace time
2322 * of 1ms for all dma to end and the flush compl to
2323 * arrive
2324 */
2325 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002327 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002328 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002329 }
2330}
2331
Sathya Perla889cd4b2010-05-30 23:33:45 +00002332static int be_close(struct net_device *netdev)
2333{
2334 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 struct be_eq_obj *eqo;
2336 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002337
Parav Pandit045508a2012-03-26 14:27:13 +00002338 be_roce_dev_close(adapter);
2339
Sathya Perla889cd4b2010-05-30 23:33:45 +00002340 be_async_mcc_disable(adapter);
2341
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002342 if (!lancer_chip(adapter))
2343 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002344
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002345 for_all_evt_queues(adapter, eqo, i) {
2346 napi_disable(&eqo->napi);
2347 if (msix_enabled(adapter))
2348 synchronize_irq(be_msix_vec_get(adapter, eqo));
2349 else
2350 synchronize_irq(netdev->irq);
2351 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002352 }
2353
Sathya Perla889cd4b2010-05-30 23:33:45 +00002354 be_irq_unregister(adapter);
2355
Sathya Perla889cd4b2010-05-30 23:33:45 +00002356 /* Wait for all pending tx completions to arrive so that
2357 * all tx skbs are freed.
2358 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002359 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002360
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002362 return 0;
2363}
2364
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002365static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002366{
2367 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002368 int rc, i, j;
2369 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002370
2371 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2373 sizeof(struct be_eth_rx_d));
2374 if (rc)
2375 return rc;
2376 }
2377
2378 /* The FW would like the default RXQ to be created first */
2379 rxo = default_rxo(adapter);
2380 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2381 adapter->if_handle, false, &rxo->rss_id);
2382 if (rc)
2383 return rc;
2384
2385 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002386 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 rx_frag_size, adapter->if_handle,
2388 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002389 if (rc)
2390 return rc;
2391 }
2392
2393 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002394 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2395 for_all_rss_queues(adapter, rxo, i) {
2396 if ((j + i) >= 128)
2397 break;
2398 rsstable[j + i] = rxo->rss_id;
2399 }
2400 }
2401 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002402 if (rc)
2403 return rc;
2404 }
2405
2406 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002408 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002409 return 0;
2410}
2411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002412static int be_open(struct net_device *netdev)
2413{
2414 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002416 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002417 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002418 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002419 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002420
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002422 if (status)
2423 goto err;
2424
Sathya Perla5fb379e2009-06-18 00:02:59 +00002425 be_irq_register(adapter);
2426
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002427 if (!lancer_chip(adapter))
2428 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002429
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002431 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002432
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 for_all_tx_queues(adapter, txo, i)
2434 be_cq_notify(adapter, txo->cq.id, true, 0);
2435
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002436 be_async_mcc_enable(adapter);
2437
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 for_all_evt_queues(adapter, eqo, i) {
2439 napi_enable(&eqo->napi);
2440 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2441 }
2442
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002443 status = be_cmd_link_status_query(adapter, NULL, NULL,
2444 &link_status, 0);
2445 if (!status)
2446 be_link_status_update(adapter, link_status);
2447
Parav Pandit045508a2012-03-26 14:27:13 +00002448 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002449 return 0;
2450err:
2451 be_close(adapter->netdev);
2452 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002453}
2454
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002455static int be_setup_wol(struct be_adapter *adapter, bool enable)
2456{
2457 struct be_dma_mem cmd;
2458 int status = 0;
2459 u8 mac[ETH_ALEN];
2460
2461 memset(mac, 0, ETH_ALEN);
2462
2463 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002464 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2465 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002466 if (cmd.va == NULL)
2467 return -1;
2468 memset(cmd.va, 0, cmd.size);
2469
2470 if (enable) {
2471 status = pci_write_config_dword(adapter->pdev,
2472 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2473 if (status) {
2474 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002475 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002476 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2477 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002478 return status;
2479 }
2480 status = be_cmd_enable_magic_wol(adapter,
2481 adapter->netdev->dev_addr, &cmd);
2482 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2483 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2484 } else {
2485 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2486 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2487 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2488 }
2489
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002490 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002491 return status;
2492}
2493
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002494/*
2495 * Generate a seed MAC address from the PF MAC Address using jhash.
2496 * MAC Address for VFs are assigned incrementally starting from the seed.
2497 * These addresses are programmed in the ASIC by the PF and the VF driver
2498 * queries for the MAC address during its probe.
2499 */
2500static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2501{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002502 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002503 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002504 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002506
2507 be_vf_eth_addr_generate(adapter, mac);
2508
Sathya Perla11ac75e2011-12-13 00:58:50 +00002509 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002510 if (lancer_chip(adapter)) {
2511 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2512 } else {
2513 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002514 vf_cfg->if_handle,
2515 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002516 }
2517
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002518 if (status)
2519 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002520 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002521 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002522 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002523
2524 mac[5] += 1;
2525 }
2526 return status;
2527}
2528
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002529static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002530{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002531 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002532 u32 vf;
2533
Sathya Perla39f1d942012-05-08 19:41:24 +00002534 if (be_find_vfs(adapter, ASSIGNED)) {
2535 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2536 goto done;
2537 }
2538
Sathya Perla11ac75e2011-12-13 00:58:50 +00002539 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002540 if (lancer_chip(adapter))
2541 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2542 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002543 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2544 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002545
Sathya Perla11ac75e2011-12-13 00:58:50 +00002546 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2547 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002548 pci_disable_sriov(adapter->pdev);
2549done:
2550 kfree(adapter->vf_cfg);
2551 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002552}
2553
Sathya Perlaa54769f2011-10-24 02:45:00 +00002554static int be_clear(struct be_adapter *adapter)
2555{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002556 int i = 1;
2557
Sathya Perla191eb752012-02-23 18:50:13 +00002558 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2559 cancel_delayed_work_sync(&adapter->work);
2560 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2561 }
2562
Sathya Perla11ac75e2011-12-13 00:58:50 +00002563 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002564 be_vf_clear(adapter);
2565
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002566 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2567 be_cmd_pmac_del(adapter, adapter->if_handle,
2568 adapter->pmac_id[i], 0);
2569
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002571
2572 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002573 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002574 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002575 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002576
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002577 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002578 return 0;
2579}
2580
Sathya Perla39f1d942012-05-08 19:41:24 +00002581static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002582{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002583 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002584 int vf;
2585
Sathya Perla39f1d942012-05-08 19:41:24 +00002586 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2587 GFP_KERNEL);
2588 if (!adapter->vf_cfg)
2589 return -ENOMEM;
2590
Sathya Perla11ac75e2011-12-13 00:58:50 +00002591 for_all_vfs(adapter, vf_cfg, vf) {
2592 vf_cfg->if_handle = -1;
2593 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002594 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002595 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002596}
2597
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002598static int be_vf_setup(struct be_adapter *adapter)
2599{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002600 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002601 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002602 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002603 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002604 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002605
Sathya Perla39f1d942012-05-08 19:41:24 +00002606 enabled_vfs = be_find_vfs(adapter, ENABLED);
2607 if (enabled_vfs) {
2608 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2609 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2610 return 0;
2611 }
2612
2613 if (num_vfs > adapter->dev_num_vfs) {
2614 dev_warn(dev, "Device supports %d VFs and not %d\n",
2615 adapter->dev_num_vfs, num_vfs);
2616 num_vfs = adapter->dev_num_vfs;
2617 }
2618
2619 status = pci_enable_sriov(adapter->pdev, num_vfs);
2620 if (!status) {
2621 adapter->num_vfs = num_vfs;
2622 } else {
2623 /* Platform doesn't support SRIOV though device supports it */
2624 dev_warn(dev, "SRIOV enable failed\n");
2625 return 0;
2626 }
2627
2628 status = be_vf_setup_init(adapter);
2629 if (status)
2630 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002631
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002632 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2633 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002634 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002635 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2636 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002637 if (status)
2638 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002639 }
2640
Sathya Perla39f1d942012-05-08 19:41:24 +00002641 if (!enabled_vfs) {
2642 status = be_vf_eth_addr_config(adapter);
2643 if (status)
2644 goto err;
2645 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002646
Sathya Perla11ac75e2011-12-13 00:58:50 +00002647 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002648 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002649 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002650 if (status)
2651 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002652 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002653
2654 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2655 vf + 1, vf_cfg->if_handle);
2656 if (status)
2657 goto err;
2658 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002659 }
2660 return 0;
2661err:
2662 return status;
2663}
2664
Sathya Perla30128032011-11-10 19:17:57 +00002665static void be_setup_init(struct be_adapter *adapter)
2666{
2667 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002668 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002669 adapter->if_handle = -1;
2670 adapter->be3_native = false;
2671 adapter->promiscuous = false;
2672 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002673 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002674}
2675
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002676static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2677 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002678{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002679 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002680
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002681 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2682 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2683 if (!lancer_chip(adapter) && !be_physfn(adapter))
2684 *active_mac = true;
2685 else
2686 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002687
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002688 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002689 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002690
2691 if (lancer_chip(adapter)) {
2692 status = be_cmd_get_mac_from_list(adapter, mac,
2693 active_mac, pmac_id, 0);
2694 if (*active_mac) {
2695 status = be_cmd_mac_addr_query(adapter, mac,
2696 MAC_ADDRESS_TYPE_NETWORK,
2697 false, if_handle,
2698 *pmac_id);
2699 }
2700 } else if (be_physfn(adapter)) {
2701 /* For BE3, for PF get permanent MAC */
2702 status = be_cmd_mac_addr_query(adapter, mac,
2703 MAC_ADDRESS_TYPE_NETWORK, true,
2704 0, 0);
2705 *active_mac = false;
2706 } else {
2707 /* For BE3, for VF get soft MAC assigned by PF*/
2708 status = be_cmd_mac_addr_query(adapter, mac,
2709 MAC_ADDRESS_TYPE_NETWORK, false,
2710 if_handle, 0);
2711 *active_mac = true;
2712 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002713 return status;
2714}
2715
Sathya Perla39f1d942012-05-08 19:41:24 +00002716/* Routine to query per function resource limits */
2717static int be_get_config(struct be_adapter *adapter)
2718{
2719 int pos;
2720 u16 dev_num_vfs;
2721
2722 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2723 if (pos) {
2724 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2725 &dev_num_vfs);
2726 adapter->dev_num_vfs = dev_num_vfs;
2727 }
2728 return 0;
2729}
2730
Sathya Perla5fb379e2009-06-18 00:02:59 +00002731static int be_setup(struct be_adapter *adapter)
2732{
Sathya Perla39f1d942012-05-08 19:41:24 +00002733 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002734 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002735 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002736 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002737 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002738 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002739
Sathya Perla30128032011-11-10 19:17:57 +00002740 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002741
Sathya Perla39f1d942012-05-08 19:41:24 +00002742 be_get_config(adapter);
2743
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002744 be_cmd_req_native_mode(adapter);
2745
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002746 be_msix_enable(adapter);
2747
2748 status = be_evt_queues_create(adapter);
2749 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002750 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002751
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002752 status = be_tx_cqs_create(adapter);
2753 if (status)
2754 goto err;
2755
2756 status = be_rx_cqs_create(adapter);
2757 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002758 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002759
Sathya Perla5fb379e2009-06-18 00:02:59 +00002760 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002761 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002762 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002763
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002764 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2765 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2766 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002767 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2768
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002769 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2770 cap_flags |= BE_IF_FLAGS_RSS;
2771 en_flags |= BE_IF_FLAGS_RSS;
2772 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002773
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002774 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002775 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002776 if (status != 0)
2777 goto err;
2778
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002779 memset(mac, 0, ETH_ALEN);
2780 active_mac = false;
2781 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2782 &active_mac, &adapter->pmac_id[0]);
2783 if (status != 0)
2784 goto err;
2785
2786 if (!active_mac) {
2787 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2788 &adapter->pmac_id[0], 0);
2789 if (status != 0)
2790 goto err;
2791 }
2792
2793 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2794 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2795 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002796 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002797
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 status = be_tx_qs_create(adapter);
2799 if (status)
2800 goto err;
2801
Sathya Perla04b71172011-09-27 13:30:27 -04002802 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002803
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002804 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002805 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002806
2807 be_set_rx_mode(adapter->netdev);
2808
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002809 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002810
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002811 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2812 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002813 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002814
Sathya Perla39f1d942012-05-08 19:41:24 +00002815 if (be_physfn(adapter) && num_vfs) {
2816 if (adapter->dev_num_vfs)
2817 be_vf_setup(adapter);
2818 else
2819 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002820 }
2821
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002822 be_cmd_get_phy_info(adapter);
2823 if (be_pause_supported(adapter))
2824 adapter->phy.fc_autoneg = 1;
2825
Sathya Perla191eb752012-02-23 18:50:13 +00002826 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2827 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002828 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002829err:
2830 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002831 return status;
2832}
2833
Ivan Vecera66268732011-12-08 01:31:21 +00002834#ifdef CONFIG_NET_POLL_CONTROLLER
2835static void be_netpoll(struct net_device *netdev)
2836{
2837 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002838 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002839 int i;
2840
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002841 for_all_evt_queues(adapter, eqo, i)
2842 event_handle(eqo);
2843
2844 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002845}
2846#endif
2847
Ajit Khaparde84517482009-09-04 03:12:16 +00002848#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002849char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2850
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002851static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002852 const u8 *p, u32 img_start, int image_size,
2853 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002854{
2855 u32 crc_offset;
2856 u8 flashed_crc[4];
2857 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002858
2859 crc_offset = hdr_size + img_start + image_size - 4;
2860
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002861 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002862
2863 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002864 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002865 if (status) {
2866 dev_err(&adapter->pdev->dev,
2867 "could not get crc from flash, not flashing redboot\n");
2868 return false;
2869 }
2870
2871 /*update redboot only if crc does not match*/
2872 if (!memcmp(flashed_crc, p, 4))
2873 return false;
2874 else
2875 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002876}
2877
Sathya Perla306f1342011-08-02 19:57:45 +00002878static bool phy_flashing_required(struct be_adapter *adapter)
2879{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002880 return (adapter->phy.phy_type == TN_8022 &&
2881 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002882}
2883
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002884static bool is_comp_in_ufi(struct be_adapter *adapter,
2885 struct flash_section_info *fsec, int type)
2886{
2887 int i = 0, img_type = 0;
2888 struct flash_section_info_g2 *fsec_g2 = NULL;
2889
2890 if (adapter->generation != BE_GEN3)
2891 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2892
2893 for (i = 0; i < MAX_FLASH_COMP; i++) {
2894 if (fsec_g2)
2895 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2896 else
2897 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2898
2899 if (img_type == type)
2900 return true;
2901 }
2902 return false;
2903
2904}
2905
2906struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2907 int header_size,
2908 const struct firmware *fw)
2909{
2910 struct flash_section_info *fsec = NULL;
2911 const u8 *p = fw->data;
2912
2913 p += header_size;
2914 while (p < (fw->data + fw->size)) {
2915 fsec = (struct flash_section_info *)p;
2916 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2917 return fsec;
2918 p += 32;
2919 }
2920 return NULL;
2921}
2922
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002923static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002924 const struct firmware *fw,
2925 struct be_dma_mem *flash_cmd,
2926 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002927
Ajit Khaparde84517482009-09-04 03:12:16 +00002928{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002929 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002930 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002931 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002932 int num_bytes;
2933 const u8 *p = fw->data;
2934 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002935 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002936 int num_comp, hdr_size;
2937 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002938
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002939 struct flash_comp gen3_flash_types[] = {
2940 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2941 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2942 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2943 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2944 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2945 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2946 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2947 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2948 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2949 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2950 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2951 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2952 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2953 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2954 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2955 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2956 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2957 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2958 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2959 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002960 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002961
2962 struct flash_comp gen2_flash_types[] = {
2963 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2964 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2965 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2966 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2967 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2968 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2969 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2970 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2971 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2972 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2973 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2974 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2975 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2976 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2977 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2978 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002979 };
2980
2981 if (adapter->generation == BE_GEN3) {
2982 pflashcomp = gen3_flash_types;
2983 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002984 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002985 } else {
2986 pflashcomp = gen2_flash_types;
2987 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002988 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002989 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002990 /* Get flash section info*/
2991 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2992 if (!fsec) {
2993 dev_err(&adapter->pdev->dev,
2994 "Invalid Cookie. UFI corrupted ?\n");
2995 return -1;
2996 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002997 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002998 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002999 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003000
3001 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3002 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3003 continue;
3004
3005 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003006 if (!phy_flashing_required(adapter))
3007 continue;
3008 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003009
3010 hdr_size = filehdr_size +
3011 (num_of_images * sizeof(struct image_hdr));
3012
3013 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3014 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3015 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003016 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003017
3018 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003019 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003020 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003021 if (p + pflashcomp[i].size > fw->data + fw->size)
3022 return -1;
3023 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003024 while (total_bytes) {
3025 if (total_bytes > 32*1024)
3026 num_bytes = 32*1024;
3027 else
3028 num_bytes = total_bytes;
3029 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003030 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003031 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003032 flash_op = FLASHROM_OPER_PHY_FLASH;
3033 else
3034 flash_op = FLASHROM_OPER_FLASH;
3035 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003036 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003037 flash_op = FLASHROM_OPER_PHY_SAVE;
3038 else
3039 flash_op = FLASHROM_OPER_SAVE;
3040 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003041 memcpy(req->params.data_buf, p, num_bytes);
3042 p += num_bytes;
3043 status = be_cmd_write_flashrom(adapter, flash_cmd,
3044 pflashcomp[i].optype, flash_op, num_bytes);
3045 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003046 if ((status == ILLEGAL_IOCTL_REQ) &&
3047 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003048 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003049 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003050 dev_err(&adapter->pdev->dev,
3051 "cmd to write to flash rom failed.\n");
3052 return -1;
3053 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003054 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003055 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003056 return 0;
3057}
3058
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003059static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3060{
3061 if (fhdr == NULL)
3062 return 0;
3063 if (fhdr->build[0] == '3')
3064 return BE_GEN3;
3065 else if (fhdr->build[0] == '2')
3066 return BE_GEN2;
3067 else
3068 return 0;
3069}
3070
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003071static int lancer_wait_idle(struct be_adapter *adapter)
3072{
3073#define SLIPORT_IDLE_TIMEOUT 30
3074 u32 reg_val;
3075 int status = 0, i;
3076
3077 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3078 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3079 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3080 break;
3081
3082 ssleep(1);
3083 }
3084
3085 if (i == SLIPORT_IDLE_TIMEOUT)
3086 status = -1;
3087
3088 return status;
3089}
3090
3091static int lancer_fw_reset(struct be_adapter *adapter)
3092{
3093 int status = 0;
3094
3095 status = lancer_wait_idle(adapter);
3096 if (status)
3097 return status;
3098
3099 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3100 PHYSDEV_CONTROL_OFFSET);
3101
3102 return status;
3103}
3104
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003105static int lancer_fw_download(struct be_adapter *adapter,
3106 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003107{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003108#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3109#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3110 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003111 const u8 *data_ptr = NULL;
3112 u8 *dest_image_ptr = NULL;
3113 size_t image_size = 0;
3114 u32 chunk_size = 0;
3115 u32 data_written = 0;
3116 u32 offset = 0;
3117 int status = 0;
3118 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003119 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003120
3121 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3122 dev_err(&adapter->pdev->dev,
3123 "FW Image not properly aligned. "
3124 "Length must be 4 byte aligned.\n");
3125 status = -EINVAL;
3126 goto lancer_fw_exit;
3127 }
3128
3129 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3130 + LANCER_FW_DOWNLOAD_CHUNK;
3131 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3132 &flash_cmd.dma, GFP_KERNEL);
3133 if (!flash_cmd.va) {
3134 status = -ENOMEM;
3135 dev_err(&adapter->pdev->dev,
3136 "Memory allocation failure while flashing\n");
3137 goto lancer_fw_exit;
3138 }
3139
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003140 dest_image_ptr = flash_cmd.va +
3141 sizeof(struct lancer_cmd_req_write_object);
3142 image_size = fw->size;
3143 data_ptr = fw->data;
3144
3145 while (image_size) {
3146 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3147
3148 /* Copy the image chunk content. */
3149 memcpy(dest_image_ptr, data_ptr, chunk_size);
3150
3151 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003152 chunk_size, offset,
3153 LANCER_FW_DOWNLOAD_LOCATION,
3154 &data_written, &change_status,
3155 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003156 if (status)
3157 break;
3158
3159 offset += data_written;
3160 data_ptr += data_written;
3161 image_size -= data_written;
3162 }
3163
3164 if (!status) {
3165 /* Commit the FW written */
3166 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003167 0, offset,
3168 LANCER_FW_DOWNLOAD_LOCATION,
3169 &data_written, &change_status,
3170 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003171 }
3172
3173 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3174 flash_cmd.dma);
3175 if (status) {
3176 dev_err(&adapter->pdev->dev,
3177 "Firmware load error. "
3178 "Status code: 0x%x Additional Status: 0x%x\n",
3179 status, add_status);
3180 goto lancer_fw_exit;
3181 }
3182
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003183 if (change_status == LANCER_FW_RESET_NEEDED) {
3184 status = lancer_fw_reset(adapter);
3185 if (status) {
3186 dev_err(&adapter->pdev->dev,
3187 "Adapter busy for FW reset.\n"
3188 "New FW will not be active.\n");
3189 goto lancer_fw_exit;
3190 }
3191 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3192 dev_err(&adapter->pdev->dev,
3193 "System reboot required for new FW"
3194 " to be active\n");
3195 }
3196
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003197 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3198lancer_fw_exit:
3199 return status;
3200}
3201
3202static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3203{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003204 struct flash_file_hdr_g2 *fhdr;
3205 struct flash_file_hdr_g3 *fhdr3;
3206 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003207 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003208 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003209 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003210
3211 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003212 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003213
Ajit Khaparde84517482009-09-04 03:12:16 +00003214 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003215 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3216 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003217 if (!flash_cmd.va) {
3218 status = -ENOMEM;
3219 dev_err(&adapter->pdev->dev,
3220 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003221 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003222 }
3223
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003224 if ((adapter->generation == BE_GEN3) &&
3225 (get_ufigen_type(fhdr) == BE_GEN3)) {
3226 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003227 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3228 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003229 img_hdr_ptr = (struct image_hdr *) (fw->data +
3230 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003231 i * sizeof(struct image_hdr)));
3232 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3233 status = be_flash_data(adapter, fw, &flash_cmd,
3234 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003235 }
3236 } else if ((adapter->generation == BE_GEN2) &&
3237 (get_ufigen_type(fhdr) == BE_GEN2)) {
3238 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3239 } else {
3240 dev_err(&adapter->pdev->dev,
3241 "UFI and Interface are not compatible for flashing\n");
3242 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003243 }
3244
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003245 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3246 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003247 if (status) {
3248 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003249 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003250 }
3251
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003252 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003253
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003254be_fw_exit:
3255 return status;
3256}
3257
3258int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3259{
3260 const struct firmware *fw;
3261 int status;
3262
3263 if (!netif_running(adapter->netdev)) {
3264 dev_err(&adapter->pdev->dev,
3265 "Firmware load not allowed (interface is down)\n");
3266 return -1;
3267 }
3268
3269 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3270 if (status)
3271 goto fw_exit;
3272
3273 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3274
3275 if (lancer_chip(adapter))
3276 status = lancer_fw_download(adapter, fw);
3277 else
3278 status = be_fw_download(adapter, fw);
3279
Ajit Khaparde84517482009-09-04 03:12:16 +00003280fw_exit:
3281 release_firmware(fw);
3282 return status;
3283}
3284
stephen hemmingere5686ad2012-01-05 19:10:25 +00003285static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003286 .ndo_open = be_open,
3287 .ndo_stop = be_close,
3288 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003289 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003290 .ndo_set_mac_address = be_mac_addr_set,
3291 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003292 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003293 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003294 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3295 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003296 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003297 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003298 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003299 .ndo_get_vf_config = be_get_vf_config,
3300#ifdef CONFIG_NET_POLL_CONTROLLER
3301 .ndo_poll_controller = be_netpoll,
3302#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003303};
3304
3305static void be_netdev_init(struct net_device *netdev)
3306{
3307 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003308 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003309 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003311 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003312 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3313 NETIF_F_HW_VLAN_TX;
3314 if (be_multi_rxq(adapter))
3315 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003316
3317 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003318 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003319
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003320 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003321 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003322
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003323 netdev->priv_flags |= IFF_UNICAST_FLT;
3324
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003325 netdev->flags |= IFF_MULTICAST;
3326
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003327 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003328
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003329 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003330
3331 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003333 for_all_evt_queues(adapter, eqo, i)
3334 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003335}
3336
3337static void be_unmap_pci_bars(struct be_adapter *adapter)
3338{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003339 if (adapter->csr)
3340 iounmap(adapter->csr);
3341 if (adapter->db)
3342 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003343 if (adapter->roce_db.base)
3344 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3345}
3346
3347static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3348{
3349 struct pci_dev *pdev = adapter->pdev;
3350 u8 __iomem *addr;
3351
3352 addr = pci_iomap(pdev, 2, 0);
3353 if (addr == NULL)
3354 return -ENOMEM;
3355
3356 adapter->roce_db.base = addr;
3357 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3358 adapter->roce_db.size = 8192;
3359 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3360 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003361}
3362
3363static int be_map_pci_bars(struct be_adapter *adapter)
3364{
3365 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003366 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003367
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003368 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003369 if (be_type_2_3(adapter)) {
3370 addr = ioremap_nocache(
3371 pci_resource_start(adapter->pdev, 0),
3372 pci_resource_len(adapter->pdev, 0));
3373 if (addr == NULL)
3374 return -ENOMEM;
3375 adapter->db = addr;
3376 }
3377 if (adapter->if_type == SLI_INTF_TYPE_3) {
3378 if (lancer_roce_map_pci_bars(adapter))
3379 goto pci_map_err;
3380 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003381 return 0;
3382 }
3383
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003384 if (be_physfn(adapter)) {
3385 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3386 pci_resource_len(adapter->pdev, 2));
3387 if (addr == NULL)
3388 return -ENOMEM;
3389 adapter->csr = addr;
3390 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003391
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003392 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003393 db_reg = 4;
3394 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003395 if (be_physfn(adapter))
3396 db_reg = 4;
3397 else
3398 db_reg = 0;
3399 }
3400 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3401 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003402 if (addr == NULL)
3403 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003404 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003405 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3406 adapter->roce_db.size = 4096;
3407 adapter->roce_db.io_addr =
3408 pci_resource_start(adapter->pdev, db_reg);
3409 adapter->roce_db.total_size =
3410 pci_resource_len(adapter->pdev, db_reg);
3411 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003412 return 0;
3413pci_map_err:
3414 be_unmap_pci_bars(adapter);
3415 return -ENOMEM;
3416}
3417
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003418static void be_ctrl_cleanup(struct be_adapter *adapter)
3419{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003420 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003421
3422 be_unmap_pci_bars(adapter);
3423
3424 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003425 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3426 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003427
Sathya Perla5b8821b2011-08-02 19:57:44 +00003428 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003429 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003430 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3431 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003432}
3433
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003434static int be_ctrl_init(struct be_adapter *adapter)
3435{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003436 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3437 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003438 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003439 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003440
3441 status = be_map_pci_bars(adapter);
3442 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003443 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003444
3445 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003446 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3447 mbox_mem_alloc->size,
3448 &mbox_mem_alloc->dma,
3449 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003450 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003451 status = -ENOMEM;
3452 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003453 }
3454 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3455 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3456 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3457 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003458
Sathya Perla5b8821b2011-08-02 19:57:44 +00003459 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3460 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3461 &rx_filter->dma, GFP_KERNEL);
3462 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003463 status = -ENOMEM;
3464 goto free_mbox;
3465 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003466 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003467
Ivan Vecera29849612010-12-14 05:43:19 +00003468 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003469 spin_lock_init(&adapter->mcc_lock);
3470 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003471
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003472 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003473 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003474 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003475
3476free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003477 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3478 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003479
3480unmap_pci_bars:
3481 be_unmap_pci_bars(adapter);
3482
3483done:
3484 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003485}
3486
3487static void be_stats_cleanup(struct be_adapter *adapter)
3488{
Sathya Perla3abcded2010-10-03 22:12:27 -07003489 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003490
3491 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003492 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3493 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494}
3495
3496static int be_stats_init(struct be_adapter *adapter)
3497{
Sathya Perla3abcded2010-10-03 22:12:27 -07003498 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003499
Selvin Xavier005d5692011-05-16 07:36:35 +00003500 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003501 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003502 } else {
3503 if (lancer_chip(adapter))
3504 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3505 else
3506 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3507 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003508 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3509 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003510 if (cmd->va == NULL)
3511 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003512 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003513 return 0;
3514}
3515
3516static void __devexit be_remove(struct pci_dev *pdev)
3517{
3518 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003519
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003520 if (!adapter)
3521 return;
3522
Parav Pandit045508a2012-03-26 14:27:13 +00003523 be_roce_dev_remove(adapter);
3524
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003525 cancel_delayed_work_sync(&adapter->func_recovery_work);
3526
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003527 unregister_netdev(adapter->netdev);
3528
Sathya Perla5fb379e2009-06-18 00:02:59 +00003529 be_clear(adapter);
3530
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003531 /* tell fw we're done with firing cmds */
3532 be_cmd_fw_clean(adapter);
3533
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003534 be_stats_cleanup(adapter);
3535
3536 be_ctrl_cleanup(adapter);
3537
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003538 pci_set_drvdata(pdev, NULL);
3539 pci_release_regions(pdev);
3540 pci_disable_device(pdev);
3541
3542 free_netdev(adapter->netdev);
3543}
3544
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003545bool be_is_wol_supported(struct be_adapter *adapter)
3546{
3547 return ((adapter->wol_cap & BE_WOL_CAP) &&
3548 !be_is_wol_excluded(adapter)) ? true : false;
3549}
3550
Somnath Kotur941a77d2012-05-17 22:59:03 +00003551u32 be_get_fw_log_level(struct be_adapter *adapter)
3552{
3553 struct be_dma_mem extfat_cmd;
3554 struct be_fat_conf_params *cfgs;
3555 int status;
3556 u32 level = 0;
3557 int j;
3558
3559 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3560 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3561 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3562 &extfat_cmd.dma);
3563
3564 if (!extfat_cmd.va) {
3565 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3566 __func__);
3567 goto err;
3568 }
3569
3570 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3571 if (!status) {
3572 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3573 sizeof(struct be_cmd_resp_hdr));
3574 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3575 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3576 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3577 }
3578 }
3579 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3580 extfat_cmd.dma);
3581err:
3582 return level;
3583}
Sathya Perla39f1d942012-05-08 19:41:24 +00003584static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003585{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003586 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003587 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003588
Sathya Perla3abcded2010-10-03 22:12:27 -07003589 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3590 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003591 if (status)
3592 return status;
3593
Sathya Perla752961a2011-10-24 02:45:03 +00003594 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003595 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003596 else
3597 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3598
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003599 if (be_physfn(adapter))
3600 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3601 else
3602 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3603
3604 /* primary mac needs 1 pmac entry */
3605 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3606 sizeof(u32), GFP_KERNEL);
3607 if (!adapter->pmac_id)
3608 return -ENOMEM;
3609
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003610 status = be_cmd_get_cntl_attributes(adapter);
3611 if (status)
3612 return status;
3613
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003614 status = be_cmd_get_acpi_wol_cap(adapter);
3615 if (status) {
3616 /* in case of a failure to get wol capabillities
3617 * check the exclusion list to determine WOL capability */
3618 if (!be_is_wol_excluded(adapter))
3619 adapter->wol_cap |= BE_WOL_CAP;
3620 }
3621
3622 if (be_is_wol_supported(adapter))
3623 adapter->wol = true;
3624
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003625 /* Must be a power of 2 or else MODULO will BUG_ON */
3626 adapter->be_get_temp_freq = 64;
3627
Somnath Kotur941a77d2012-05-17 22:59:03 +00003628 level = be_get_fw_log_level(adapter);
3629 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3630
Sathya Perla2243e2e2009-11-22 22:02:03 +00003631 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003632}
3633
Sathya Perla39f1d942012-05-08 19:41:24 +00003634static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003635{
3636 struct pci_dev *pdev = adapter->pdev;
3637 u32 sli_intf = 0, if_type;
3638
3639 switch (pdev->device) {
3640 case BE_DEVICE_ID1:
3641 case OC_DEVICE_ID1:
3642 adapter->generation = BE_GEN2;
3643 break;
3644 case BE_DEVICE_ID2:
3645 case OC_DEVICE_ID2:
3646 adapter->generation = BE_GEN3;
3647 break;
3648 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003649 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003650 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003651 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3652 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003653 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3654 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003655 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003656 !be_type_2_3(adapter)) {
3657 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3658 return -EINVAL;
3659 }
3660 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3661 SLI_INTF_FAMILY_SHIFT);
3662 adapter->generation = BE_GEN3;
3663 break;
3664 case OC_DEVICE_ID5:
3665 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3666 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003667 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3668 return -EINVAL;
3669 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003670 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3671 SLI_INTF_FAMILY_SHIFT);
3672 adapter->generation = BE_GEN3;
3673 break;
3674 default:
3675 adapter->generation = 0;
3676 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003677
3678 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3679 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003680 return 0;
3681}
3682
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003683static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003684{
3685 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003686
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003687 status = lancer_test_and_set_rdy_state(adapter);
3688 if (status)
3689 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003690
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003691 if (netif_running(adapter->netdev))
3692 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003693
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003694 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003695
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003696 adapter->hw_error = false;
3697 adapter->fw_timeout = false;
3698
3699 status = be_setup(adapter);
3700 if (status)
3701 goto err;
3702
3703 if (netif_running(adapter->netdev)) {
3704 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003705 if (status)
3706 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003707 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003708
3709 dev_err(&adapter->pdev->dev,
3710 "Adapter SLIPORT recovery succeeded\n");
3711 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003712err:
3713 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003714 "Adapter SLIPORT recovery failed\n");
3715
3716 return status;
3717}
3718
3719static void be_func_recovery_task(struct work_struct *work)
3720{
3721 struct be_adapter *adapter =
3722 container_of(work, struct be_adapter, func_recovery_work.work);
3723 int status;
3724
3725 be_detect_error(adapter);
3726
3727 if (adapter->hw_error && lancer_chip(adapter)) {
3728
3729 if (adapter->eeh_error)
3730 goto out;
3731
3732 rtnl_lock();
3733 netif_device_detach(adapter->netdev);
3734 rtnl_unlock();
3735
3736 status = lancer_recover_func(adapter);
3737
3738 if (!status)
3739 netif_device_attach(adapter->netdev);
3740 }
3741
3742out:
3743 schedule_delayed_work(&adapter->func_recovery_work,
3744 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003745}
3746
3747static void be_worker(struct work_struct *work)
3748{
3749 struct be_adapter *adapter =
3750 container_of(work, struct be_adapter, work.work);
3751 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003752 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003753 int i;
3754
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003755 /* when interrupts are not yet enabled, just reap any pending
3756 * mcc completions */
3757 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003758 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003759 goto reschedule;
3760 }
3761
3762 if (!adapter->stats_cmd_sent) {
3763 if (lancer_chip(adapter))
3764 lancer_cmd_get_pport_stats(adapter,
3765 &adapter->stats_cmd);
3766 else
3767 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3768 }
3769
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003770 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3771 be_cmd_get_die_temperature(adapter);
3772
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003773 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003774 if (rxo->rx_post_starved) {
3775 rxo->rx_post_starved = false;
3776 be_post_rx_frags(rxo, GFP_KERNEL);
3777 }
3778 }
3779
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003780 for_all_evt_queues(adapter, eqo, i)
3781 be_eqd_update(adapter, eqo);
3782
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003783reschedule:
3784 adapter->work_counter++;
3785 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3786}
3787
Sathya Perla39f1d942012-05-08 19:41:24 +00003788static bool be_reset_required(struct be_adapter *adapter)
3789{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003790 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003791}
3792
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003793static int __devinit be_probe(struct pci_dev *pdev,
3794 const struct pci_device_id *pdev_id)
3795{
3796 int status = 0;
3797 struct be_adapter *adapter;
3798 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003799
3800 status = pci_enable_device(pdev);
3801 if (status)
3802 goto do_none;
3803
3804 status = pci_request_regions(pdev, DRV_NAME);
3805 if (status)
3806 goto disable_dev;
3807 pci_set_master(pdev);
3808
Sathya Perla7f640062012-06-05 19:37:20 +00003809 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003810 if (netdev == NULL) {
3811 status = -ENOMEM;
3812 goto rel_reg;
3813 }
3814 adapter = netdev_priv(netdev);
3815 adapter->pdev = pdev;
3816 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003817
Sathya Perla39f1d942012-05-08 19:41:24 +00003818 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003819 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003820 goto free_netdev;
3821
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003822 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003823 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003824
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003825 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003826 if (!status) {
3827 netdev->features |= NETIF_F_HIGHDMA;
3828 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003829 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003830 if (status) {
3831 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3832 goto free_netdev;
3833 }
3834 }
3835
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003836 status = be_ctrl_init(adapter);
3837 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003838 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003839
Sathya Perla2243e2e2009-11-22 22:02:03 +00003840 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003841 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003842 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003843 if (status)
3844 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003845 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003846
3847 /* tell fw we're ready to fire cmds */
3848 status = be_cmd_fw_init(adapter);
3849 if (status)
3850 goto ctrl_clean;
3851
Sathya Perla39f1d942012-05-08 19:41:24 +00003852 if (be_reset_required(adapter)) {
3853 status = be_cmd_reset_function(adapter);
3854 if (status)
3855 goto ctrl_clean;
3856 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003857
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003858 /* The INTR bit may be set in the card when probed by a kdump kernel
3859 * after a crash.
3860 */
3861 if (!lancer_chip(adapter))
3862 be_intr_set(adapter, false);
3863
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003864 status = be_stats_init(adapter);
3865 if (status)
3866 goto ctrl_clean;
3867
Sathya Perla39f1d942012-05-08 19:41:24 +00003868 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003869 if (status)
3870 goto stats_clean;
3871
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003872 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003873 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003874 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003875
Sathya Perla5fb379e2009-06-18 00:02:59 +00003876 status = be_setup(adapter);
3877 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003878 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003879
Sathya Perla3abcded2010-10-03 22:12:27 -07003880 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003881 status = register_netdev(netdev);
3882 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003883 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003884
Parav Pandit045508a2012-03-26 14:27:13 +00003885 be_roce_dev_add(adapter);
3886
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003887 schedule_delayed_work(&adapter->func_recovery_work,
3888 msecs_to_jiffies(1000));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003889 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3890 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003891
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003892 return 0;
3893
Sathya Perla5fb379e2009-06-18 00:02:59 +00003894unsetup:
3895 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003896msix_disable:
3897 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003898stats_clean:
3899 be_stats_cleanup(adapter);
3900ctrl_clean:
3901 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003902free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003903 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003904 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003905rel_reg:
3906 pci_release_regions(pdev);
3907disable_dev:
3908 pci_disable_device(pdev);
3909do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003910 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003911 return status;
3912}
3913
3914static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3915{
3916 struct be_adapter *adapter = pci_get_drvdata(pdev);
3917 struct net_device *netdev = adapter->netdev;
3918
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003919 if (adapter->wol)
3920 be_setup_wol(adapter, true);
3921
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003922 cancel_delayed_work_sync(&adapter->func_recovery_work);
3923
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003924 netif_device_detach(netdev);
3925 if (netif_running(netdev)) {
3926 rtnl_lock();
3927 be_close(netdev);
3928 rtnl_unlock();
3929 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003930 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003931
3932 pci_save_state(pdev);
3933 pci_disable_device(pdev);
3934 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3935 return 0;
3936}
3937
3938static int be_resume(struct pci_dev *pdev)
3939{
3940 int status = 0;
3941 struct be_adapter *adapter = pci_get_drvdata(pdev);
3942 struct net_device *netdev = adapter->netdev;
3943
3944 netif_device_detach(netdev);
3945
3946 status = pci_enable_device(pdev);
3947 if (status)
3948 return status;
3949
3950 pci_set_power_state(pdev, 0);
3951 pci_restore_state(pdev);
3952
Sathya Perla2243e2e2009-11-22 22:02:03 +00003953 /* tell fw we're ready to fire cmds */
3954 status = be_cmd_fw_init(adapter);
3955 if (status)
3956 return status;
3957
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003958 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003959 if (netif_running(netdev)) {
3960 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003961 be_open(netdev);
3962 rtnl_unlock();
3963 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003964
3965 schedule_delayed_work(&adapter->func_recovery_work,
3966 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003967 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003968
3969 if (adapter->wol)
3970 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003971
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003972 return 0;
3973}
3974
Sathya Perla82456b02010-02-17 01:35:37 +00003975/*
3976 * An FLR will stop BE from DMAing any data.
3977 */
3978static void be_shutdown(struct pci_dev *pdev)
3979{
3980 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003981
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003982 if (!adapter)
3983 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003984
Sathya Perla0f4a6822011-03-21 20:49:28 +00003985 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003986 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003987
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003988 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003989
Sathya Perla82456b02010-02-17 01:35:37 +00003990 if (adapter->wol)
3991 be_setup_wol(adapter, true);
3992
Ajit Khaparde57841862011-04-06 18:08:43 +00003993 be_cmd_reset_function(adapter);
3994
Sathya Perla82456b02010-02-17 01:35:37 +00003995 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003996}
3997
Sathya Perlacf588472010-02-14 21:22:01 +00003998static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3999 pci_channel_state_t state)
4000{
4001 struct be_adapter *adapter = pci_get_drvdata(pdev);
4002 struct net_device *netdev = adapter->netdev;
4003
4004 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4005
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004006 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004007
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004008 cancel_delayed_work_sync(&adapter->func_recovery_work);
4009
4010 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004011 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004012 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004013
4014 if (netif_running(netdev)) {
4015 rtnl_lock();
4016 be_close(netdev);
4017 rtnl_unlock();
4018 }
4019 be_clear(adapter);
4020
4021 if (state == pci_channel_io_perm_failure)
4022 return PCI_ERS_RESULT_DISCONNECT;
4023
4024 pci_disable_device(pdev);
4025
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004026 /* The error could cause the FW to trigger a flash debug dump.
4027 * Resetting the card while flash dump is in progress
4028 * can cause it not to recover; wait for it to finish
4029 */
4030 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00004031 return PCI_ERS_RESULT_NEED_RESET;
4032}
4033
4034static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4035{
4036 struct be_adapter *adapter = pci_get_drvdata(pdev);
4037 int status;
4038
4039 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004040 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004041
4042 status = pci_enable_device(pdev);
4043 if (status)
4044 return PCI_ERS_RESULT_DISCONNECT;
4045
4046 pci_set_master(pdev);
4047 pci_set_power_state(pdev, 0);
4048 pci_restore_state(pdev);
4049
4050 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004051 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004052 if (status)
4053 return PCI_ERS_RESULT_DISCONNECT;
4054
4055 return PCI_ERS_RESULT_RECOVERED;
4056}
4057
4058static void be_eeh_resume(struct pci_dev *pdev)
4059{
4060 int status = 0;
4061 struct be_adapter *adapter = pci_get_drvdata(pdev);
4062 struct net_device *netdev = adapter->netdev;
4063
4064 dev_info(&adapter->pdev->dev, "EEH resume\n");
4065
4066 pci_save_state(pdev);
4067
4068 /* tell fw we're ready to fire cmds */
4069 status = be_cmd_fw_init(adapter);
4070 if (status)
4071 goto err;
4072
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004073 status = be_cmd_reset_function(adapter);
4074 if (status)
4075 goto err;
4076
Sathya Perlacf588472010-02-14 21:22:01 +00004077 status = be_setup(adapter);
4078 if (status)
4079 goto err;
4080
4081 if (netif_running(netdev)) {
4082 status = be_open(netdev);
4083 if (status)
4084 goto err;
4085 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004086
4087 schedule_delayed_work(&adapter->func_recovery_work,
4088 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004089 netif_device_attach(netdev);
4090 return;
4091err:
4092 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004093}
4094
4095static struct pci_error_handlers be_eeh_handlers = {
4096 .error_detected = be_eeh_err_detected,
4097 .slot_reset = be_eeh_reset,
4098 .resume = be_eeh_resume,
4099};
4100
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004101static struct pci_driver be_driver = {
4102 .name = DRV_NAME,
4103 .id_table = be_dev_ids,
4104 .probe = be_probe,
4105 .remove = be_remove,
4106 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004107 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004108 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004109 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004110};
4111
4112static int __init be_init_module(void)
4113{
Joe Perches8e95a202009-12-03 07:58:21 +00004114 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4115 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004116 printk(KERN_WARNING DRV_NAME
4117 " : Module param rx_frag_size must be 2048/4096/8192."
4118 " Using 2048\n");
4119 rx_frag_size = 2048;
4120 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004122 return pci_register_driver(&be_driver);
4123}
4124module_init(be_init_module);
4125
4126static void __exit be_exit_module(void)
4127{
4128 pci_unregister_driver(&be_driver);
4129}
4130module_exit(be_exit_module);