blob: 36ad9a7ef4ba7e1751363721104de12223c6d0e2 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000147 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
Sathya Perla8788fdc2009-07-27 22:52:03 +0000152static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perlacf588472010-02-14 21:22:01 +0000156 if (adapter->eeh_err)
157 return;
158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000189
190 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192}
193
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000201
202 if (adapter->eeh_err)
203 return;
204
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212}
213
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
221 if (adapter->eeh_err)
222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
Somnath Koture3a7ae22011-10-27 07:14:05 +0000241 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000249 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 if (status)
251 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252
Somnath Koture3a7ae22011-10-27 07:14:05 +0000253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 return status;
260}
261
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000262static void populate_be2_stats(struct be_adapter *adapter)
263{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000267 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270
Sathya Perlaac124ff2011-07-25 19:10:14 +0000271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000289 drvs->rx_address_mismatch_drops =
290 port_stats->rx_address_mismatch_drops +
291 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 drvs->rx_alignment_symbol_errors =
293 port_stats->rx_alignment_symbol_errors;
294
295 drvs->tx_pauseframes = port_stats->tx_pauseframes;
296 drvs->tx_controlframes = port_stats->tx_controlframes;
297
298 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000299 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000316 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319
Sathya Perlaac124ff2011-07-25 19:10:14 +0000320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000340 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 drvs->forwarded_packets = rxf_stats->forwarded_packets;
350 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
352 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
Selvin Xavier005d5692011-05-16 07:36:35 +0000356static void populate_lancer_stats(struct be_adapter *adapter)
357{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358
Selvin Xavier005d5692011-05-16 07:36:35 +0000359 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000360 struct lancer_pport_stats *pport_stats =
361 pport_stats_from_cmd(adapter);
362
363 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
364 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
365 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
366 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
370 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
371 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
372 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
373 drvs->rx_dropped_tcp_length =
374 pport_stats->rx_dropped_invalid_tcp_length;
375 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
376 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
377 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
378 drvs->rx_dropped_header_too_small =
379 pport_stats->rx_dropped_header_too_small;
380 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000381 drvs->rx_address_mismatch_drops =
382 pport_stats->rx_address_mismatch_drops +
383 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->forwarded_packets = pport_stats->num_forwards_lo;
390 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394
Sathya Perla09c1c682011-08-22 19:41:53 +0000395static void accumulate_16bit_val(u32 *acc, u16 val)
396{
397#define lo(x) (x & 0xFFFF)
398#define hi(x) (x & 0xFFFF0000)
399 bool wrapped = val < lo(*acc);
400 u32 newacc = hi(*acc) + val;
401
402 if (wrapped)
403 newacc += 65536;
404 ACCESS_ONCE(*acc) = newacc;
405}
406
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000407void be_parse_stats(struct be_adapter *adapter)
408{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000409 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
410 struct be_rx_obj *rxo;
411 int i;
412
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 if (adapter->generation == BE_GEN3) {
414 if (lancer_chip(adapter))
415 populate_lancer_stats(adapter);
416 else
417 populate_be3_stats(adapter);
418 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000420 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421
422 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000423 for_all_rx_queues(adapter, rxo, i) {
424 /* below erx HW counter can actually wrap around after
425 * 65535. Driver accumulates a 32-bit value
426 */
427 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
428 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
429 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430}
431
Sathya Perlaab1594e2011-07-25 19:10:15 +0000432static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
433 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700434{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000435 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700437 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000438 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439 u64 pkts, bytes;
440 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700441 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700442
Sathya Perla3abcded2010-10-03 22:12:27 -0700443 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000444 const struct be_rx_stats *rx_stats = rx_stats(rxo);
445 do {
446 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
447 pkts = rx_stats(rxo)->rx_pkts;
448 bytes = rx_stats(rxo)->rx_bytes;
449 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
450 stats->rx_packets += pkts;
451 stats->rx_bytes += bytes;
452 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
453 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
454 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700455 }
456
Sathya Perla3c8def92011-06-12 20:01:58 +0000457 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000458 const struct be_tx_stats *tx_stats = tx_stats(txo);
459 do {
460 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
461 pkts = tx_stats(txo)->tx_pkts;
462 bytes = tx_stats(txo)->tx_bytes;
463 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
464 stats->tx_packets += pkts;
465 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000466 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467
468 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000469 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000470 drvs->rx_alignment_symbol_errors +
471 drvs->rx_in_range_errors +
472 drvs->rx_out_range_errors +
473 drvs->rx_frame_too_long +
474 drvs->rx_dropped_too_small +
475 drvs->rx_dropped_too_short +
476 drvs->rx_dropped_header_too_small +
477 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000478 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000481 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000482 drvs->rx_out_range_errors +
483 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000484
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
487 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000489
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490 /* receiver fifo overrun */
491 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000493 drvs->rx_input_fifo_overflow_drop +
494 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496}
497
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000498void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700499{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500 struct net_device *netdev = adapter->netdev;
501
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000502 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000503 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000504 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700505 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000506
507 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
508 netif_carrier_on(netdev);
509 else
510 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511}
512
Sathya Perla3c8def92011-06-12 20:01:58 +0000513static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000514 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perla3c8def92011-06-12 20:01:58 +0000516 struct be_tx_stats *stats = tx_stats(txo);
517
Sathya Perlaab1594e2011-07-25 19:10:15 +0000518 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 stats->tx_reqs++;
520 stats->tx_wrbs += wrb_cnt;
521 stats->tx_bytes += copied;
522 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526}
527
528/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000529static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
530 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700532 int cnt = (skb->len > skb->data_len);
533
534 cnt += skb_shinfo(skb)->nr_frags;
535
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536 /* to account for hdr wrb */
537 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000538 if (lancer_chip(adapter) || !(cnt & 1)) {
539 *dummy = false;
540 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541 /* add a dummy to make it an even num */
542 cnt++;
543 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000544 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700545 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
546 return cnt;
547}
548
549static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
550{
551 wrb->frag_pa_hi = upper_32_bits(addr);
552 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
553 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
554}
555
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000556static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
557 struct sk_buff *skb)
558{
559 u8 vlan_prio;
560 u16 vlan_tag;
561
562 vlan_tag = vlan_tx_tag_get(skb);
563 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
564 /* If vlan priority provided by OS is NOT in available bmap */
565 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
566 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
567 adapter->recommended_prio;
568
569 return vlan_tag;
570}
571
Somnath Koturcc4ce022010-10-21 07:11:14 -0700572static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
573 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700574{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000575 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700576
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577 memset(hdr, 0, sizeof(*hdr));
578
579 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
580
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000581 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
584 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000585 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000587 if (lancer_chip(adapter) && adapter->sli_family ==
588 LANCER_A0_SLI_FAMILY) {
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
590 if (is_tcp_pkt(skb))
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
592 tcpcs, hdr, 1);
593 else if (is_udp_pkt(skb))
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
595 udpcs, hdr, 1);
596 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
598 if (is_tcp_pkt(skb))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
602 }
603
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700604 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000606 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608 }
609
610 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
614}
615
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000616static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000617 bool unmap_single)
618{
619 dma_addr_t dma;
620
621 be_dws_le_to_cpu(wrb, sizeof(*wrb));
622
623 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000624 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000625 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000626 dma_unmap_single(dev, dma, wrb->frag_len,
627 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000628 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000630 }
631}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
Sathya Perla3c8def92011-06-12 20:01:58 +0000633static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
635{
Sathya Perla7101e112010-03-22 20:41:12 +0000636 dma_addr_t busaddr;
637 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000638 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 struct be_eth_wrb *wrb;
641 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000642 bool map_single = false;
643 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645 hdr = queue_head_node(txq);
646 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000647 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
David S. Millerebc8d2a2009-06-09 01:01:31 -0700649 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700650 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
652 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000653 goto dma_err;
654 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700655 wrb = queue_head_node(txq);
656 wrb_fill(wrb, busaddr, len);
657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 queue_head_inc(txq);
659 copied += len;
660 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000663 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700664 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000665 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000666 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000667 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000668 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 be_dws_cpu_to_le(wrb, sizeof(*wrb));
672 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674 }
675
676 if (dummy_wrb) {
677 wrb = queue_head_node(txq);
678 wrb_fill(wrb, 0, 0);
679 be_dws_cpu_to_le(wrb, sizeof(*wrb));
680 queue_head_inc(txq);
681 }
682
Somnath Koturcc4ce022010-10-21 07:11:14 -0700683 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 be_dws_cpu_to_le(hdr, sizeof(*hdr));
685
686 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000687dma_err:
688 txq->head = map_head;
689 while (copied) {
690 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000691 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000692 map_single = false;
693 copied -= wrb->frag_len;
694 queue_head_inc(txq);
695 }
696 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697}
698
Stephen Hemminger613573252009-08-31 19:50:58 +0000699static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700700 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701{
702 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000703 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
704 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705 u32 wrb_cnt = 0, copied = 0;
706 u32 start = txq->head;
707 bool dummy_wrb, stopped = false;
708
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000709 /* For vlan tagged pkts, BE
710 * 1) calculates checksum even when CSO is not requested
711 * 2) calculates checksum wrongly for padded pkt less than
712 * 60 bytes long.
713 * As a workaround disable TX vlan offloading in such cases.
714 */
715 if (unlikely(vlan_tx_tag_present(skb) &&
716 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 goto tx_drop;
720
721 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
722 if (unlikely(!skb))
723 goto tx_drop;
724
725 skb->vlan_tci = 0;
726 }
727
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000728 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729
Sathya Perla3c8def92011-06-12 20:01:58 +0000730 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000731 if (copied) {
732 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000733 BUG_ON(txo->sent_skb_list[start]);
734 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000736 /* Ensure txq has space for the next skb; Else stop the queue
737 * *BEFORE* ringing the tx doorbell, so that we serialze the
738 * tx compls of the current transmit which'll wake up the queue
739 */
Sathya Perla7101e112010-03-22 20:41:12 +0000740 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000741 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
742 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000743 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000744 stopped = true;
745 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000747 be_txq_notify(adapter, txq->id, wrb_cnt);
748
Sathya Perla3c8def92011-06-12 20:01:58 +0000749 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000750 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 } else {
752 txq->head = start;
753 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000755tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756 return NETDEV_TX_OK;
757}
758
759static int be_change_mtu(struct net_device *netdev, int new_mtu)
760{
761 struct be_adapter *adapter = netdev_priv(netdev);
762 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000763 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
764 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 dev_info(&adapter->pdev->dev,
766 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000767 BE_MIN_MTU,
768 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700769 return -EINVAL;
770 }
771 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
772 netdev->mtu, new_mtu);
773 netdev->mtu = new_mtu;
774 return 0;
775}
776
777/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000778 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
779 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000781static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000783 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784 u16 vtag[BE_NUM_VLANS_SUPPORTED];
785 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000786 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000787
788 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000789 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
790 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
791 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000792 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
Ajit Khaparde82903e42010-02-09 01:34:57 +0000798 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000800 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 if (adapter->vlan_tag[i]) {
802 vtag[ntags] = cpu_to_le16(i);
803 ntags++;
804 }
805 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
807 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700809 status = be_cmd_vlan_config(adapter, adapter->if_handle,
810 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700811 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000812
Sathya Perlab31c50a2009-09-17 10:30:13 -0700813 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814}
815
Jiri Pirko8e586132011-12-08 19:52:37 -0500816static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817{
818 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000819 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000821 if (!be_physfn(adapter)) {
822 status = -EINVAL;
823 goto ret;
824 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000825
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000827 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000828 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500829
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000830 if (!status)
831 adapter->vlans_added++;
832 else
833 adapter->vlan_tag[vid] = 0;
834ret:
835 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836}
837
Jiri Pirko8e586132011-12-08 19:52:37 -0500838static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839{
840 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000841 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000843 if (!be_physfn(adapter)) {
844 status = -EINVAL;
845 goto ret;
846 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000847
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000849 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000850 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500851
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000852 if (!status)
853 adapter->vlans_added--;
854 else
855 adapter->vlan_tag[vid] = 1;
856ret:
857 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858}
859
Sathya Perlaa54769f2011-10-24 02:45:00 +0000860static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861{
862 struct be_adapter *adapter = netdev_priv(netdev);
863
864 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000865 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000866 adapter->promiscuous = true;
867 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000869
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300870 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000871 if (adapter->promiscuous) {
872 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000873 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000874
875 if (adapter->vlans_added)
876 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000877 }
878
Sathya Perlae7b909a2009-11-22 22:01:10 +0000879 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000880 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000881 netdev_mc_count(netdev) > BE_MAX_MC) {
882 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000883 goto done;
884 }
885
Sathya Perla5b8821b2011-08-02 19:57:44 +0000886 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000887done:
888 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700889}
890
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000891static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
892{
893 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000894 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000895 int status;
896
Sathya Perla11ac75e2011-12-13 00:58:50 +0000897 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000898 return -EPERM;
899
Sathya Perla11ac75e2011-12-13 00:58:50 +0000900 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000901 return -EINVAL;
902
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000903 if (lancer_chip(adapter)) {
904 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
905 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000906 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
907 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000908
Sathya Perla11ac75e2011-12-13 00:58:50 +0000909 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
910 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000911 }
912
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000913 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000914 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
915 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000916 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000917 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000918
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000919 return status;
920}
921
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000922static int be_get_vf_config(struct net_device *netdev, int vf,
923 struct ifla_vf_info *vi)
924{
925 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000926 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000927
Sathya Perla11ac75e2011-12-13 00:58:50 +0000928 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000929 return -EPERM;
930
Sathya Perla11ac75e2011-12-13 00:58:50 +0000931 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000932 return -EINVAL;
933
934 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000935 vi->tx_rate = vf_cfg->tx_rate;
936 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000937 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000938 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000939
940 return 0;
941}
942
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000943static int be_set_vf_vlan(struct net_device *netdev,
944 int vf, u16 vlan, u8 qos)
945{
946 struct be_adapter *adapter = netdev_priv(netdev);
947 int status = 0;
948
Sathya Perla11ac75e2011-12-13 00:58:50 +0000949 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000950 return -EPERM;
951
Sathya Perla11ac75e2011-12-13 00:58:50 +0000952 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000953 return -EINVAL;
954
955 if (vlan) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000956 adapter->vf_cfg[vf].vlan_tag = vlan;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000957 adapter->vlans_added++;
958 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000959 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000960 adapter->vlans_added--;
961 }
962
963 status = be_vid_config(adapter, true, vf);
964
965 if (status)
966 dev_info(&adapter->pdev->dev,
967 "VLAN %d config on VF %d failed\n", vlan, vf);
968 return status;
969}
970
Ajit Khapardee1d18732010-07-23 01:52:13 +0000971static int be_set_vf_tx_rate(struct net_device *netdev,
972 int vf, int rate)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 int status = 0;
976
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +0000978 return -EPERM;
979
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000980 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +0000981 return -EINVAL;
982
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000983 if (rate < 100 || rate > 10000) {
984 dev_err(&adapter->pdev->dev,
985 "tx rate must be between 100 and 10000 Mbps\n");
986 return -EINVAL;
987 }
Ajit Khapardee1d18732010-07-23 01:52:13 +0000988
Ajit Khaparde856c4012011-02-11 13:32:32 +0000989 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000990
991 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000992 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +0000993 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000994 else
995 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000996 return status;
997}
998
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000999static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001001 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001002 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001003 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001004 u64 pkts;
1005 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001007 if (!eqo->enable_aic) {
1008 eqd = eqo->eqd;
1009 goto modify_eqd;
1010 }
1011
1012 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001013 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001015 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1016
Sathya Perla4097f662009-03-24 16:40:13 -07001017 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001018 if (time_before(now, stats->rx_jiffies)) {
1019 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001020 return;
1021 }
1022
Sathya Perlaac124ff2011-07-25 19:10:14 +00001023 /* Update once a second */
1024 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001025 return;
1026
Sathya Perlaab1594e2011-07-25 19:10:15 +00001027 do {
1028 start = u64_stats_fetch_begin_bh(&stats->sync);
1029 pkts = stats->rx_pkts;
1030 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1031
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001032 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001033 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001034 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001035 eqd = (stats->rx_pps / 110000) << 3;
1036 eqd = min(eqd, eqo->max_eqd);
1037 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001038 if (eqd < 10)
1039 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001040
1041modify_eqd:
1042 if (eqd != eqo->cur_eqd) {
1043 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1044 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001045 }
Sathya Perla4097f662009-03-24 16:40:13 -07001046}
1047
Sathya Perla3abcded2010-10-03 22:12:27 -07001048static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001049 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001050{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001051 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001052
Sathya Perlaab1594e2011-07-25 19:10:15 +00001053 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001054 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001055 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001056 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001057 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001058 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001059 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001060 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001061 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062}
1063
Sathya Perla2e588f82011-03-11 02:49:26 +00001064static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001065{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001066 /* L4 checksum is not reliable for non TCP/UDP packets.
1067 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001068 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1069 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001070}
1071
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001072static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1073 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001075 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001077 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078
Sathya Perla3abcded2010-10-03 22:12:27 -07001079 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080 BUG_ON(!rx_page_info->page);
1081
Ajit Khaparde205859a2010-02-09 01:34:21 +00001082 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001083 dma_unmap_page(&adapter->pdev->dev,
1084 dma_unmap_addr(rx_page_info, bus),
1085 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001086 rx_page_info->last_page_user = false;
1087 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088
1089 atomic_dec(&rxq->used);
1090 return rx_page_info;
1091}
1092
1093/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001094static void be_rx_compl_discard(struct be_rx_obj *rxo,
1095 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096{
Sathya Perla3abcded2010-10-03 22:12:27 -07001097 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001099 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001101 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001102 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001103 put_page(page_info->page);
1104 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001105 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106 }
1107}
1108
1109/*
1110 * skb_fill_rx_data forms a complete skb for an ether frame
1111 * indicated by rxcp.
1112 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001113static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1114 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115{
Sathya Perla3abcded2010-10-03 22:12:27 -07001116 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001118 u16 i, j;
1119 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120 u8 *start;
1121
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001122 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001123 start = page_address(page_info->page) + page_info->page_offset;
1124 prefetch(start);
1125
1126 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001127 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128
1129 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001130 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131 memcpy(skb->data, start, hdr_len);
1132 skb->len = curr_frag_len;
1133 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1134 /* Complete packet has now been moved to data */
1135 put_page(page_info->page);
1136 skb->data_len = 0;
1137 skb->tail += curr_frag_len;
1138 } else {
1139 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001140 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141 skb_shinfo(skb)->frags[0].page_offset =
1142 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001143 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001145 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146 skb->tail += hdr_len;
1147 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001148 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149
Sathya Perla2e588f82011-03-11 02:49:26 +00001150 if (rxcp->pkt_size <= rx_frag_size) {
1151 BUG_ON(rxcp->num_rcvd != 1);
1152 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153 }
1154
1155 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001156 index_inc(&rxcp->rxq_idx, rxq->len);
1157 remaining = rxcp->pkt_size - curr_frag_len;
1158 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001159 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001160 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001162 /* Coalesce all frags from the same physical page in one slot */
1163 if (page_info->page_offset == 0) {
1164 /* Fresh page */
1165 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001166 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001167 skb_shinfo(skb)->frags[j].page_offset =
1168 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001169 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001170 skb_shinfo(skb)->nr_frags++;
1171 } else {
1172 put_page(page_info->page);
1173 }
1174
Eric Dumazet9e903e02011-10-18 21:00:24 +00001175 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176 skb->len += curr_frag_len;
1177 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001178 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001179 remaining -= curr_frag_len;
1180 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001181 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001183 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184}
1185
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001186/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001187static void be_rx_compl_process(struct be_rx_obj *rxo,
1188 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001190 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001191 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001193
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001194 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001195 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001196 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001197 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198 return;
1199 }
1200
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001201 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001203 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001204 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001205 else
1206 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001208 skb->protocol = eth_type_trans(skb, netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001209 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001210 skb->rxhash = rxcp->rss_hash;
1211
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212
Jiri Pirko343e43c2011-08-25 02:50:51 +00001213 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001214 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1215
1216 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217}
1218
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001219/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001220void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1221 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001223 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001225 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001226 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 u16 remaining, curr_frag_len;
1228 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001229
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001230 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001231 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001232 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001233 return;
1234 }
1235
Sathya Perla2e588f82011-03-11 02:49:26 +00001236 remaining = rxcp->pkt_size;
1237 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001238 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239
1240 curr_frag_len = min(remaining, rx_frag_size);
1241
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001242 /* Coalesce all frags from the same physical page in one slot */
1243 if (i == 0 || page_info->page_offset == 0) {
1244 /* First frag or Fresh page */
1245 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001246 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001247 skb_shinfo(skb)->frags[j].page_offset =
1248 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001249 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001250 } else {
1251 put_page(page_info->page);
1252 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001253 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001254 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001256 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257 memset(page_info, 0, sizeof(*page_info));
1258 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001259 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001261 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 skb->len = rxcp->pkt_size;
1263 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001264 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001265 if (adapter->netdev->features & NETIF_F_RXHASH)
1266 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001267
Jiri Pirko343e43c2011-08-25 02:50:51 +00001268 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001269 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272}
1273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001274static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276{
Sathya Perla2e588f82011-03-11 02:49:26 +00001277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001302 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001303 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001304}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001306static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001308{
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1333 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001334 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001335 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001336}
1337
1338static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1339{
1340 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1341 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1342 struct be_adapter *adapter = rxo->adapter;
1343
1344 /* For checking the valid bit it is Ok to use either definition as the
1345 * valid bit is at the same position in both v0 and v1 Rx compl */
1346 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347 return NULL;
1348
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001349 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001350 be_dws_le_to_cpu(compl, sizeof(*compl));
1351
1352 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001353 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001354 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001355 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001356
Sathya Perla15d72182011-03-21 20:49:26 +00001357 if (rxcp->vlanf) {
1358 /* vlanf could be wrongly set in some cards.
1359 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001360 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001361 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001362
Sathya Perla15d72182011-03-21 20:49:26 +00001363 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001364 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001365
Somnath Kotur939cf302011-08-18 21:51:49 -07001366 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001367 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001368 rxcp->vlanf = 0;
1369 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001370
1371 /* As the compl has been parsed, reset it; we wont touch it again */
1372 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
Sathya Perla3abcded2010-10-03 22:12:27 -07001374 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 return rxcp;
1376}
1377
Eric Dumazet1829b082011-03-01 05:48:12 +00001378static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001383 gfp |= __GFP_COMP;
1384 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385}
1386
1387/*
1388 * Allocate a page, split it to fragments of size rx_frag_size and post as
1389 * receive buffers to BE
1390 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001391static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392{
Sathya Perla3abcded2010-10-03 22:12:27 -07001393 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001394 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001395 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 struct page *pagep = NULL;
1397 struct be_eth_rx_d *rxd;
1398 u64 page_dmaaddr = 0, frag_dmaaddr;
1399 u32 posted, page_offset = 0;
1400
Sathya Perla3abcded2010-10-03 22:12:27 -07001401 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1403 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001404 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001406 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 break;
1408 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001409 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1410 0, adapter->big_page_size,
1411 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412 page_info->page_offset = 0;
1413 } else {
1414 get_page(pagep);
1415 page_info->page_offset = page_offset + rx_frag_size;
1416 }
1417 page_offset = page_info->page_offset;
1418 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001419 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1421
1422 rxd = queue_head_node(rxq);
1423 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1424 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
1426 /* Any space left in the current big page for another frag? */
1427 if ((page_offset + rx_frag_size + rx_frag_size) >
1428 adapter->big_page_size) {
1429 pagep = NULL;
1430 page_info->last_page_user = true;
1431 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001432
1433 prev_page_info = page_info;
1434 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001435 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 }
1437 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001438 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001439
1440 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001442 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001443 } else if (atomic_read(&rxq->used) == 0) {
1444 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001445 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447}
1448
Sathya Perla5fb379e2009-06-18 00:02:59 +00001449static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1452
1453 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1454 return NULL;
1455
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001456 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1458
1459 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1460
1461 queue_tail_inc(tx_cq);
1462 return txcp;
1463}
1464
Sathya Perla3c8def92011-06-12 20:01:58 +00001465static u16 be_tx_compl_process(struct be_adapter *adapter,
1466 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467{
Sathya Perla3c8def92011-06-12 20:01:58 +00001468 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001469 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001470 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001472 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1473 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001475 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001477 sent_skbs[txq->tail] = NULL;
1478
1479 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001480 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001482 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001484 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001485 unmap_tx_frag(&adapter->pdev->dev, wrb,
1486 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001487 unmap_skb_hdr = false;
1488
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 num_wrbs++;
1490 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001491 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001494 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495}
1496
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001497/* Return the number of events in the event queue */
1498static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001499{
1500 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001501 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001502
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001503 do {
1504 eqe = queue_tail_node(&eqo->q);
1505 if (eqe->evt == 0)
1506 break;
1507
1508 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001509 eqe->evt = 0;
1510 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001511 queue_tail_inc(&eqo->q);
1512 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001513
1514 return num;
1515}
1516
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001517static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001518{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001519 bool rearm = false;
1520 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001521
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001522 /* Deal with any spurious interrupts that come without events */
1523 if (!num)
1524 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001525
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001526 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001527 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001528 napi_schedule(&eqo->napi);
1529
1530 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001531}
1532
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001533/* Leaves the EQ is disarmed state */
1534static void be_eq_clean(struct be_eq_obj *eqo)
1535{
1536 int num = events_get(eqo);
1537
1538 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1539}
1540
1541static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542{
1543 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001544 struct be_queue_info *rxq = &rxo->q;
1545 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001546 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 u16 tail;
1548
1549 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001550 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001551 be_rx_compl_discard(rxo, rxcp);
1552 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 }
1554
1555 /* Then free posted rx buffer that were not used */
1556 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001557 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001558 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 put_page(page_info->page);
1560 memset(page_info, 0, sizeof(*page_info));
1561 }
1562 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001563 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564}
1565
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001566static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001568 struct be_tx_obj *txo;
1569 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001570 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001571 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001572 struct sk_buff *sent_skb;
1573 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001574 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
Sathya Perlaa8e91792009-08-10 03:42:43 +00001576 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1577 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001578 pending_txqs = adapter->num_tx_qs;
1579
1580 for_all_tx_queues(adapter, txo, i) {
1581 txq = &txo->q;
1582 while ((txcp = be_tx_compl_get(&txo->cq))) {
1583 end_idx =
1584 AMAP_GET_BITS(struct amap_eth_tx_compl,
1585 wrb_index, txcp);
1586 num_wrbs += be_tx_compl_process(adapter, txo,
1587 end_idx);
1588 cmpl++;
1589 }
1590 if (cmpl) {
1591 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1592 atomic_sub(num_wrbs, &txq->used);
1593 cmpl = 0;
1594 num_wrbs = 0;
1595 }
1596 if (atomic_read(&txq->used) == 0)
1597 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001598 }
1599
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001600 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001601 break;
1602
1603 mdelay(1);
1604 } while (true);
1605
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001606 for_all_tx_queues(adapter, txo, i) {
1607 txq = &txo->q;
1608 if (atomic_read(&txq->used))
1609 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1610 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001611
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001612 /* free posted tx for which compls will never arrive */
1613 while (atomic_read(&txq->used)) {
1614 sent_skb = txo->sent_skb_list[txq->tail];
1615 end_idx = txq->tail;
1616 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1617 &dummy_wrb);
1618 index_adv(&end_idx, num_wrbs - 1, txq->len);
1619 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1620 atomic_sub(num_wrbs, &txq->used);
1621 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001622 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623}
1624
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001625static void be_evt_queues_destroy(struct be_adapter *adapter)
1626{
1627 struct be_eq_obj *eqo;
1628 int i;
1629
1630 for_all_evt_queues(adapter, eqo, i) {
1631 be_eq_clean(eqo);
1632 if (eqo->q.created)
1633 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1634 be_queue_free(adapter, &eqo->q);
1635 }
1636}
1637
1638static int be_evt_queues_create(struct be_adapter *adapter)
1639{
1640 struct be_queue_info *eq;
1641 struct be_eq_obj *eqo;
1642 int i, rc;
1643
1644 adapter->num_evt_qs = num_irqs(adapter);
1645
1646 for_all_evt_queues(adapter, eqo, i) {
1647 eqo->adapter = adapter;
1648 eqo->tx_budget = BE_TX_BUDGET;
1649 eqo->idx = i;
1650 eqo->max_eqd = BE_MAX_EQD;
1651 eqo->enable_aic = true;
1652
1653 eq = &eqo->q;
1654 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1655 sizeof(struct be_eq_entry));
1656 if (rc)
1657 return rc;
1658
1659 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1660 if (rc)
1661 return rc;
1662 }
1663 return rc;
1664}
1665
Sathya Perla5fb379e2009-06-18 00:02:59 +00001666static void be_mcc_queues_destroy(struct be_adapter *adapter)
1667{
1668 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001669
Sathya Perla8788fdc2009-07-27 22:52:03 +00001670 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001671 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001672 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001673 be_queue_free(adapter, q);
1674
Sathya Perla8788fdc2009-07-27 22:52:03 +00001675 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001676 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001677 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001678 be_queue_free(adapter, q);
1679}
1680
1681/* Must be called only after TX qs are created as MCC shares TX EQ */
1682static int be_mcc_queues_create(struct be_adapter *adapter)
1683{
1684 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001685
Sathya Perla8788fdc2009-07-27 22:52:03 +00001686 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001687 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001688 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001689 goto err;
1690
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001691 /* Use the default EQ for MCC completions */
1692 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001693 goto mcc_cq_free;
1694
Sathya Perla8788fdc2009-07-27 22:52:03 +00001695 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001696 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1697 goto mcc_cq_destroy;
1698
Sathya Perla8788fdc2009-07-27 22:52:03 +00001699 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001700 goto mcc_q_free;
1701
1702 return 0;
1703
1704mcc_q_free:
1705 be_queue_free(adapter, q);
1706mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001707 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001708mcc_cq_free:
1709 be_queue_free(adapter, cq);
1710err:
1711 return -1;
1712}
1713
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714static void be_tx_queues_destroy(struct be_adapter *adapter)
1715{
1716 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001717 struct be_tx_obj *txo;
1718 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719
Sathya Perla3c8def92011-06-12 20:01:58 +00001720 for_all_tx_queues(adapter, txo, i) {
1721 q = &txo->q;
1722 if (q->created)
1723 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1724 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725
Sathya Perla3c8def92011-06-12 20:01:58 +00001726 q = &txo->cq;
1727 if (q->created)
1728 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1729 be_queue_free(adapter, q);
1730 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731}
1732
Sathya Perladafc0fe2011-10-24 02:45:02 +00001733static int be_num_txqs_want(struct be_adapter *adapter)
1734{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001735 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001736 lancer_chip(adapter) || !be_physfn(adapter) ||
1737 adapter->generation == BE_GEN2)
1738 return 1;
1739 else
1740 return MAX_TX_QS;
1741}
1742
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001743static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001745 struct be_queue_info *cq, *eq;
1746 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001747 struct be_tx_obj *txo;
1748 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749
Sathya Perladafc0fe2011-10-24 02:45:02 +00001750 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001751 if (adapter->num_tx_qs != MAX_TX_QS) {
1752 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001753 netif_set_real_num_tx_queues(adapter->netdev,
1754 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001755 rtnl_unlock();
1756 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001757
Sathya Perla3c8def92011-06-12 20:01:58 +00001758 for_all_tx_queues(adapter, txo, i) {
1759 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001760 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1761 sizeof(struct be_eth_tx_compl));
1762 if (status)
1763 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001765 /* If num_evt_qs is less than num_tx_qs, then more than
1766 * one txq share an eq
1767 */
1768 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1769 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1770 if (status)
1771 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001772 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774}
1775
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001776static int be_tx_qs_create(struct be_adapter *adapter)
1777{
1778 struct be_tx_obj *txo;
1779 int i, status;
1780
1781 for_all_tx_queues(adapter, txo, i) {
1782 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1783 sizeof(struct be_eth_wrb));
1784 if (status)
1785 return status;
1786
1787 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1788 if (status)
1789 return status;
1790 }
1791
1792 return 0;
1793}
1794
1795static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796{
1797 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001798 struct be_rx_obj *rxo;
1799 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001802 q = &rxo->cq;
1803 if (q->created)
1804 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1805 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807}
1808
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001809static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001810{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001811 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001812 struct be_rx_obj *rxo;
1813 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001815 /* We'll create as many RSS rings as there are irqs.
1816 * But when there's only one irq there's no use creating RSS rings
1817 */
1818 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1819 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001820
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001822 for_all_rx_queues(adapter, rxo, i) {
1823 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001824 cq = &rxo->cq;
1825 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1826 sizeof(struct be_eth_rx_compl));
1827 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001828 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001830 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1831 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001832 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001833 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001834 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001836 if (adapter->num_rx_qs != MAX_RX_QS)
1837 dev_info(&adapter->pdev->dev,
1838 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001841}
1842
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843static irqreturn_t be_intx(int irq, void *dev)
1844{
1845 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001846 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001848 /* With INTx only one EQ is used */
1849 num_evts = event_handle(&adapter->eq_obj[0]);
1850 if (num_evts)
1851 return IRQ_HANDLED;
1852 else
1853 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854}
1855
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001856static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001858 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001860 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861 return IRQ_HANDLED;
1862}
1863
Sathya Perla2e588f82011-03-11 02:49:26 +00001864static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865{
Sathya Perla2e588f82011-03-11 02:49:26 +00001866 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867}
1868
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001869static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1870 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871{
Sathya Perla3abcded2010-10-03 22:12:27 -07001872 struct be_adapter *adapter = rxo->adapter;
1873 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001874 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875 u32 work_done;
1876
1877 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001878 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 if (!rxcp)
1880 break;
1881
Sathya Perla12004ae2011-08-02 19:57:46 +00001882 /* Is it a flush compl that has no data */
1883 if (unlikely(rxcp->num_rcvd == 0))
1884 goto loop_continue;
1885
1886 /* Discard compl with partial DMA Lancer B0 */
1887 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001888 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001889 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001890 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001891
Sathya Perla12004ae2011-08-02 19:57:46 +00001892 /* On BE drop pkts that arrive due to imperfect filtering in
1893 * promiscuous mode on some skews
1894 */
1895 if (unlikely(rxcp->port != adapter->port_num &&
1896 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001897 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001898 goto loop_continue;
1899 }
1900
1901 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001902 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001903 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001904 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001905loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001906 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 }
1908
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909 if (work_done) {
1910 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001911
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001912 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1913 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 return work_done;
1917}
1918
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001919static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1920 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001923 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925 for (work_done = 0; work_done < budget; work_done++) {
1926 txcp = be_tx_compl_get(&txo->cq);
1927 if (!txcp)
1928 break;
1929 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00001930 AMAP_GET_BITS(struct amap_eth_tx_compl,
1931 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001932 }
1933
1934 if (work_done) {
1935 be_cq_notify(adapter, txo->cq.id, true, work_done);
1936 atomic_sub(num_wrbs, &txo->q.used);
1937
1938 /* As Tx wrbs have been freed up, wake up netdev queue
1939 * if it was stopped due to lack of tx wrbs. */
1940 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1941 atomic_read(&txo->q.used) < txo->q.len / 2) {
1942 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00001943 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001944
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1946 tx_stats(txo)->tx_compl += work_done;
1947 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1948 }
1949 return (work_done < budget); /* Done */
1950}
Sathya Perla3c8def92011-06-12 20:01:58 +00001951
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952int be_poll(struct napi_struct *napi, int budget)
1953{
1954 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1955 struct be_adapter *adapter = eqo->adapter;
1956 int max_work = 0, work, i;
1957 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00001958
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 /* Process all TXQs serviced by this EQ */
1960 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1961 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1962 eqo->tx_budget, i);
1963 if (!tx_done)
1964 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 }
1966
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001967 /* This loop will iterate twice for EQ0 in which
1968 * completions of the last RXQ (default one) are also processed
1969 * For other EQs the loop iterates only once
1970 */
1971 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
1972 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
1973 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001974 }
1975
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001976 if (is_mcc_eqo(eqo))
1977 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001978
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001979 if (max_work < budget) {
1980 napi_complete(napi);
1981 be_eq_notify(adapter, eqo->q.id, true, false, 0);
1982 } else {
1983 /* As we'll continue in polling mode, count and clear events */
1984 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00001985 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001986 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987}
1988
Ajit Khaparded053de92010-09-03 06:23:30 +00001989void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001990{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001991 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1992 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00001993 u32 i;
1994
Sathya Perla72f02482011-11-10 19:17:58 +00001995 if (adapter->eeh_err || adapter->ue_detected)
1996 return;
1997
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001998 if (lancer_chip(adapter)) {
1999 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2000 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2001 sliport_err1 = ioread32(adapter->db +
2002 SLIPORT_ERROR1_OFFSET);
2003 sliport_err2 = ioread32(adapter->db +
2004 SLIPORT_ERROR2_OFFSET);
2005 }
2006 } else {
2007 pci_read_config_dword(adapter->pdev,
2008 PCICFG_UE_STATUS_LOW, &ue_lo);
2009 pci_read_config_dword(adapter->pdev,
2010 PCICFG_UE_STATUS_HIGH, &ue_hi);
2011 pci_read_config_dword(adapter->pdev,
2012 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2013 pci_read_config_dword(adapter->pdev,
2014 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002015
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002016 ue_lo = (ue_lo & (~ue_lo_mask));
2017 ue_hi = (ue_hi & (~ue_hi_mask));
2018 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002019
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002020 if (ue_lo || ue_hi ||
2021 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002022 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002023 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002024 dev_err(&adapter->pdev->dev,
2025 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002026 }
2027
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002028 if (ue_lo) {
2029 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2030 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002031 dev_err(&adapter->pdev->dev,
2032 "UE: %s bit set\n", ue_status_low_desc[i]);
2033 }
2034 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002035 if (ue_hi) {
2036 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2037 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002038 dev_err(&adapter->pdev->dev,
2039 "UE: %s bit set\n", ue_status_hi_desc[i]);
2040 }
2041 }
2042
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002043 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2044 dev_err(&adapter->pdev->dev,
2045 "sliport status 0x%x\n", sliport_status);
2046 dev_err(&adapter->pdev->dev,
2047 "sliport error1 0x%x\n", sliport_err1);
2048 dev_err(&adapter->pdev->dev,
2049 "sliport error2 0x%x\n", sliport_err2);
2050 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002051}
2052
Sathya Perla8d56ff12009-11-22 22:02:26 +00002053static void be_msix_disable(struct be_adapter *adapter)
2054{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002055 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002056 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002057 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002058 }
2059}
2060
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002061static uint be_num_rss_want(struct be_adapter *adapter)
2062{
2063 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2064 adapter->num_vfs == 0 && be_physfn(adapter) &&
2065 !be_is_mc(adapter))
2066 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2067 else
2068 return 0;
2069}
2070
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071static void be_msix_enable(struct be_adapter *adapter)
2072{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002073#define BE_MIN_MSIX_VECTORS 1
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002074 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002076 /* If RSS queues are not used, need a vec for default RX Q */
2077 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2078 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002079
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002080 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081 adapter->msix_entries[i].entry = i;
2082
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002083 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002084 if (status == 0) {
2085 goto done;
2086 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002087 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002088 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002089 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002090 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002091 }
2092 return;
2093done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002094 adapter->num_msix_vec = num_vec;
2095 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002096}
2097
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002098static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002099{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002100 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002101
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002102#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002103 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002104 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002105 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002106
2107 pos = pci_find_ext_capability(adapter->pdev,
2108 PCI_EXT_CAP_ID_SRIOV);
2109 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002110 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002111
Sathya Perla11ac75e2011-12-13 00:58:50 +00002112 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2113 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002114 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002115 "Device supports %d VFs and not %d\n",
2116 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002117
Sathya Perla11ac75e2011-12-13 00:58:50 +00002118 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2119 if (status)
2120 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002121
Sathya Perla11ac75e2011-12-13 00:58:50 +00002122 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002123 adapter->vf_cfg = kcalloc(num_vfs,
2124 sizeof(struct be_vf_cfg),
2125 GFP_KERNEL);
2126 if (!adapter->vf_cfg)
2127 return -ENOMEM;
2128 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002129 }
2130#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002131 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002132}
2133
2134static void be_sriov_disable(struct be_adapter *adapter)
2135{
2136#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002137 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002138 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002139 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002140 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002141 }
2142#endif
2143}
2144
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002145static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002146 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002147{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002148 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002149}
2150
2151static int be_msix_register(struct be_adapter *adapter)
2152{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153 struct net_device *netdev = adapter->netdev;
2154 struct be_eq_obj *eqo;
2155 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 for_all_evt_queues(adapter, eqo, i) {
2158 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2159 vec = be_msix_vec_get(adapter, eqo);
2160 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002161 if (status)
2162 goto err_msix;
2163 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002164
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002166err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002167 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2168 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2169 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2170 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002171 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172 return status;
2173}
2174
2175static int be_irq_register(struct be_adapter *adapter)
2176{
2177 struct net_device *netdev = adapter->netdev;
2178 int status;
2179
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002180 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002181 status = be_msix_register(adapter);
2182 if (status == 0)
2183 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002184 /* INTx is not supported for VF */
2185 if (!be_physfn(adapter))
2186 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187 }
2188
2189 /* INTx */
2190 netdev->irq = adapter->pdev->irq;
2191 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2192 adapter);
2193 if (status) {
2194 dev_err(&adapter->pdev->dev,
2195 "INTx request IRQ failed - err %d\n", status);
2196 return status;
2197 }
2198done:
2199 adapter->isr_registered = true;
2200 return 0;
2201}
2202
2203static void be_irq_unregister(struct be_adapter *adapter)
2204{
2205 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002206 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002207 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208
2209 if (!adapter->isr_registered)
2210 return;
2211
2212 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002213 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002214 free_irq(netdev->irq, adapter);
2215 goto done;
2216 }
2217
2218 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002219 for_all_evt_queues(adapter, eqo, i)
2220 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002221
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222done:
2223 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224}
2225
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002226static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002227{
2228 struct be_queue_info *q;
2229 struct be_rx_obj *rxo;
2230 int i;
2231
2232 for_all_rx_queues(adapter, rxo, i) {
2233 q = &rxo->q;
2234 if (q->created) {
2235 be_cmd_rxq_destroy(adapter, q);
2236 /* After the rxq is invalidated, wait for a grace time
2237 * of 1ms for all dma to end and the flush compl to
2238 * arrive
2239 */
2240 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002242 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002243 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002244 }
2245}
2246
Sathya Perla889cd4b2010-05-30 23:33:45 +00002247static int be_close(struct net_device *netdev)
2248{
2249 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250 struct be_eq_obj *eqo;
2251 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002252
Sathya Perla889cd4b2010-05-30 23:33:45 +00002253 be_async_mcc_disable(adapter);
2254
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002255 if (!lancer_chip(adapter))
2256 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002257
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002258 for_all_evt_queues(adapter, eqo, i) {
2259 napi_disable(&eqo->napi);
2260 if (msix_enabled(adapter))
2261 synchronize_irq(be_msix_vec_get(adapter, eqo));
2262 else
2263 synchronize_irq(netdev->irq);
2264 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002265 }
2266
Sathya Perla889cd4b2010-05-30 23:33:45 +00002267 be_irq_unregister(adapter);
2268
Sathya Perla889cd4b2010-05-30 23:33:45 +00002269 /* Wait for all pending tx completions to arrive so that
2270 * all tx skbs are freed.
2271 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002272 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002275 return 0;
2276}
2277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002279{
2280 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002281 int rc, i, j;
2282 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002283
2284 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2286 sizeof(struct be_eth_rx_d));
2287 if (rc)
2288 return rc;
2289 }
2290
2291 /* The FW would like the default RXQ to be created first */
2292 rxo = default_rxo(adapter);
2293 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2294 adapter->if_handle, false, &rxo->rss_id);
2295 if (rc)
2296 return rc;
2297
2298 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002299 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 rx_frag_size, adapter->if_handle,
2301 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002302 if (rc)
2303 return rc;
2304 }
2305
2306 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002307 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2308 for_all_rss_queues(adapter, rxo, i) {
2309 if ((j + i) >= 128)
2310 break;
2311 rsstable[j + i] = rxo->rss_id;
2312 }
2313 }
2314 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002315 if (rc)
2316 return rc;
2317 }
2318
2319 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002320 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002321 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002322 return 0;
2323}
2324
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325static int be_open(struct net_device *netdev)
2326{
2327 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002328 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002329 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002330 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002331 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002332 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002333
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002335 if (status)
2336 goto err;
2337
Sathya Perla5fb379e2009-06-18 00:02:59 +00002338 be_irq_register(adapter);
2339
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002340 if (!lancer_chip(adapter))
2341 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002342
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002344 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002345
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002346 for_all_tx_queues(adapter, txo, i)
2347 be_cq_notify(adapter, txo->cq.id, true, 0);
2348
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002349 be_async_mcc_enable(adapter);
2350
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002351 for_all_evt_queues(adapter, eqo, i) {
2352 napi_enable(&eqo->napi);
2353 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2354 }
2355
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002356 status = be_cmd_link_status_query(adapter, NULL, NULL,
2357 &link_status, 0);
2358 if (!status)
2359 be_link_status_update(adapter, link_status);
2360
Sathya Perla889cd4b2010-05-30 23:33:45 +00002361 return 0;
2362err:
2363 be_close(adapter->netdev);
2364 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002365}
2366
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002367static int be_setup_wol(struct be_adapter *adapter, bool enable)
2368{
2369 struct be_dma_mem cmd;
2370 int status = 0;
2371 u8 mac[ETH_ALEN];
2372
2373 memset(mac, 0, ETH_ALEN);
2374
2375 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002376 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2377 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002378 if (cmd.va == NULL)
2379 return -1;
2380 memset(cmd.va, 0, cmd.size);
2381
2382 if (enable) {
2383 status = pci_write_config_dword(adapter->pdev,
2384 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2385 if (status) {
2386 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002387 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002388 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2389 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002390 return status;
2391 }
2392 status = be_cmd_enable_magic_wol(adapter,
2393 adapter->netdev->dev_addr, &cmd);
2394 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2395 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2396 } else {
2397 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2398 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2399 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2400 }
2401
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002402 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002403 return status;
2404}
2405
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002406/*
2407 * Generate a seed MAC address from the PF MAC Address using jhash.
2408 * MAC Address for VFs are assigned incrementally starting from the seed.
2409 * These addresses are programmed in the ASIC by the PF and the VF driver
2410 * queries for the MAC address during its probe.
2411 */
2412static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2413{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002414 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002415 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002416 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002417 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002418
2419 be_vf_eth_addr_generate(adapter, mac);
2420
Sathya Perla11ac75e2011-12-13 00:58:50 +00002421 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002422 if (lancer_chip(adapter)) {
2423 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2424 } else {
2425 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002426 vf_cfg->if_handle,
2427 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002428 }
2429
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002430 if (status)
2431 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002432 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002433 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002434 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002435
2436 mac[5] += 1;
2437 }
2438 return status;
2439}
2440
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002441static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002442{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002443 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002444 u32 vf;
2445
Sathya Perla11ac75e2011-12-13 00:58:50 +00002446 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002447 if (lancer_chip(adapter))
2448 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2449 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002450 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2451 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002452
Sathya Perla11ac75e2011-12-13 00:58:50 +00002453 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2454 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002455}
2456
Sathya Perlaa54769f2011-10-24 02:45:00 +00002457static int be_clear(struct be_adapter *adapter)
2458{
Sathya Perla191eb752012-02-23 18:50:13 +00002459 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2460 cancel_delayed_work_sync(&adapter->work);
2461 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2462 }
2463
Sathya Perla11ac75e2011-12-13 00:58:50 +00002464 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002465 be_vf_clear(adapter);
2466
2467 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002468
2469 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002470 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002471 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002472 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002473
2474 /* tell fw we're done with firing cmds */
2475 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002476
2477 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002478 return 0;
2479}
2480
Sathya Perla30128032011-11-10 19:17:57 +00002481static void be_vf_setup_init(struct be_adapter *adapter)
2482{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002483 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002484 int vf;
2485
Sathya Perla11ac75e2011-12-13 00:58:50 +00002486 for_all_vfs(adapter, vf_cfg, vf) {
2487 vf_cfg->if_handle = -1;
2488 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002489 }
2490}
2491
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002492static int be_vf_setup(struct be_adapter *adapter)
2493{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002494 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002495 u32 cap_flags, en_flags, vf;
2496 u16 lnk_speed;
2497 int status;
2498
Sathya Perla30128032011-11-10 19:17:57 +00002499 be_vf_setup_init(adapter);
2500
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002501 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2502 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002503 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002504 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002506 if (status)
2507 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002508 }
2509
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002510 status = be_vf_eth_addr_config(adapter);
2511 if (status)
2512 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002513
Sathya Perla11ac75e2011-12-13 00:58:50 +00002514 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002515 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002516 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002517 if (status)
2518 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002519 vf_cfg->tx_rate = lnk_speed * 10;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002520 }
2521 return 0;
2522err:
2523 return status;
2524}
2525
Sathya Perla30128032011-11-10 19:17:57 +00002526static void be_setup_init(struct be_adapter *adapter)
2527{
2528 adapter->vlan_prio_bmap = 0xff;
2529 adapter->link_speed = -1;
2530 adapter->if_handle = -1;
2531 adapter->be3_native = false;
2532 adapter->promiscuous = false;
2533 adapter->eq_next_idx = 0;
2534}
2535
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002536static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002537{
2538 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002539 int status;
2540 bool pmac_id_active;
2541
2542 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2543 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002544 if (status != 0)
2545 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002546
2547 if (pmac_id_active) {
2548 status = be_cmd_mac_addr_query(adapter, mac,
2549 MAC_ADDRESS_TYPE_NETWORK,
2550 false, adapter->if_handle, pmac_id);
2551
2552 if (!status)
2553 adapter->pmac_id = pmac_id;
2554 } else {
2555 status = be_cmd_pmac_add(adapter, mac,
2556 adapter->if_handle, &adapter->pmac_id, 0);
2557 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002558do_none:
2559 return status;
2560}
2561
Sathya Perla5fb379e2009-06-18 00:02:59 +00002562static int be_setup(struct be_adapter *adapter)
2563{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002564 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002565 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002566 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002567 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002568 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002569
Sathya Perla30128032011-11-10 19:17:57 +00002570 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002571
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002572 be_cmd_req_native_mode(adapter);
2573
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002574 be_msix_enable(adapter);
2575
2576 status = be_evt_queues_create(adapter);
2577 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002578 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002580 status = be_tx_cqs_create(adapter);
2581 if (status)
2582 goto err;
2583
2584 status = be_rx_cqs_create(adapter);
2585 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002586 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002587
Sathya Perla5fb379e2009-06-18 00:02:59 +00002588 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002589 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002590 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002592 memset(mac, 0, ETH_ALEN);
2593 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002594 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002595 if (status)
2596 return status;
2597 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2598 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2599
2600 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2601 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2602 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002603 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2604
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002605 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2606 cap_flags |= BE_IF_FLAGS_RSS;
2607 en_flags |= BE_IF_FLAGS_RSS;
2608 }
2609 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2610 netdev->dev_addr, &adapter->if_handle,
2611 &adapter->pmac_id, 0);
2612 if (status != 0)
2613 goto err;
2614
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002615 /* The VF's permanent mac queried from card is incorrect.
2616 * For BEx: Query the mac configued by the PF using if_handle
2617 * For Lancer: Get and use mac_list to obtain mac address.
2618 */
2619 if (!be_physfn(adapter)) {
2620 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002621 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002622 else
2623 status = be_cmd_mac_addr_query(adapter, mac,
2624 MAC_ADDRESS_TYPE_NETWORK, false,
2625 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002626 if (!status) {
2627 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2628 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2629 }
2630 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002631
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002632 status = be_tx_qs_create(adapter);
2633 if (status)
2634 goto err;
2635
Sathya Perla04b71172011-09-27 13:30:27 -04002636 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002637
Sathya Perlaa54769f2011-10-24 02:45:00 +00002638 status = be_vid_config(adapter, false, 0);
2639 if (status)
2640 goto err;
2641
2642 be_set_rx_mode(adapter->netdev);
2643
2644 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002645 /* For Lancer: It is legal for this cmd to fail on VF */
2646 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002647 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002648
Sathya Perlaa54769f2011-10-24 02:45:00 +00002649 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2650 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2651 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002652 /* For Lancer: It is legal for this cmd to fail on VF */
2653 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002654 goto err;
2655 }
2656
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002657 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002658
Sathya Perla11ac75e2011-12-13 00:58:50 +00002659 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002660 status = be_vf_setup(adapter);
2661 if (status)
2662 goto err;
2663 }
2664
Sathya Perla191eb752012-02-23 18:50:13 +00002665 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2666 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2667
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002668 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002669err:
2670 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002671 return status;
2672}
2673
Ivan Vecera66268732011-12-08 01:31:21 +00002674#ifdef CONFIG_NET_POLL_CONTROLLER
2675static void be_netpoll(struct net_device *netdev)
2676{
2677 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002678 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002679 int i;
2680
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002681 for_all_evt_queues(adapter, eqo, i)
2682 event_handle(eqo);
2683
2684 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002685}
2686#endif
2687
Ajit Khaparde84517482009-09-04 03:12:16 +00002688#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002689static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002690 const u8 *p, u32 img_start, int image_size,
2691 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002692{
2693 u32 crc_offset;
2694 u8 flashed_crc[4];
2695 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002696
2697 crc_offset = hdr_size + img_start + image_size - 4;
2698
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002699 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002700
2701 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002702 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002703 if (status) {
2704 dev_err(&adapter->pdev->dev,
2705 "could not get crc from flash, not flashing redboot\n");
2706 return false;
2707 }
2708
2709 /*update redboot only if crc does not match*/
2710 if (!memcmp(flashed_crc, p, 4))
2711 return false;
2712 else
2713 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002714}
2715
Sathya Perla306f1342011-08-02 19:57:45 +00002716static bool phy_flashing_required(struct be_adapter *adapter)
2717{
2718 int status = 0;
2719 struct be_phy_info phy_info;
2720
2721 status = be_cmd_get_phy_info(adapter, &phy_info);
2722 if (status)
2723 return false;
2724 if ((phy_info.phy_type == TN_8022) &&
2725 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2726 return true;
2727 }
2728 return false;
2729}
2730
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002731static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002732 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002733 struct be_dma_mem *flash_cmd, int num_of_images)
2734
Ajit Khaparde84517482009-09-04 03:12:16 +00002735{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002736 int status = 0, i, filehdr_size = 0;
2737 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002738 int num_bytes;
2739 const u8 *p = fw->data;
2740 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002741 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002742 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002743
Sathya Perla306f1342011-08-02 19:57:45 +00002744 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002745 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2746 FLASH_IMAGE_MAX_SIZE_g3},
2747 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2748 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2749 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2750 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2751 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2752 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2753 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2754 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2755 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2756 FLASH_IMAGE_MAX_SIZE_g3},
2757 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2758 FLASH_IMAGE_MAX_SIZE_g3},
2759 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002760 FLASH_IMAGE_MAX_SIZE_g3},
2761 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002762 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2763 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2764 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002765 };
Joe Perches215faf92010-12-21 02:16:10 -08002766 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002767 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2768 FLASH_IMAGE_MAX_SIZE_g2},
2769 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2770 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2771 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2772 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2773 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2774 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2775 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2776 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2777 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2778 FLASH_IMAGE_MAX_SIZE_g2},
2779 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2780 FLASH_IMAGE_MAX_SIZE_g2},
2781 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2782 FLASH_IMAGE_MAX_SIZE_g2}
2783 };
2784
2785 if (adapter->generation == BE_GEN3) {
2786 pflashcomp = gen3_flash_types;
2787 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002788 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002789 } else {
2790 pflashcomp = gen2_flash_types;
2791 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002792 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002793 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002794 for (i = 0; i < num_comp; i++) {
2795 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2796 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2797 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002798 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2799 if (!phy_flashing_required(adapter))
2800 continue;
2801 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002802 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2803 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002804 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2805 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002806 continue;
2807 p = fw->data;
2808 p += filehdr_size + pflashcomp[i].offset
2809 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002810 if (p + pflashcomp[i].size > fw->data + fw->size)
2811 return -1;
2812 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002813 while (total_bytes) {
2814 if (total_bytes > 32*1024)
2815 num_bytes = 32*1024;
2816 else
2817 num_bytes = total_bytes;
2818 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002819 if (!total_bytes) {
2820 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2821 flash_op = FLASHROM_OPER_PHY_FLASH;
2822 else
2823 flash_op = FLASHROM_OPER_FLASH;
2824 } else {
2825 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2826 flash_op = FLASHROM_OPER_PHY_SAVE;
2827 else
2828 flash_op = FLASHROM_OPER_SAVE;
2829 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002830 memcpy(req->params.data_buf, p, num_bytes);
2831 p += num_bytes;
2832 status = be_cmd_write_flashrom(adapter, flash_cmd,
2833 pflashcomp[i].optype, flash_op, num_bytes);
2834 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002835 if ((status == ILLEGAL_IOCTL_REQ) &&
2836 (pflashcomp[i].optype ==
2837 IMG_TYPE_PHY_FW))
2838 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002839 dev_err(&adapter->pdev->dev,
2840 "cmd to write to flash rom failed.\n");
2841 return -1;
2842 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002843 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002844 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002845 return 0;
2846}
2847
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002848static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2849{
2850 if (fhdr == NULL)
2851 return 0;
2852 if (fhdr->build[0] == '3')
2853 return BE_GEN3;
2854 else if (fhdr->build[0] == '2')
2855 return BE_GEN2;
2856 else
2857 return 0;
2858}
2859
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002860static int lancer_fw_download(struct be_adapter *adapter,
2861 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002862{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002863#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2864#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2865 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002866 const u8 *data_ptr = NULL;
2867 u8 *dest_image_ptr = NULL;
2868 size_t image_size = 0;
2869 u32 chunk_size = 0;
2870 u32 data_written = 0;
2871 u32 offset = 0;
2872 int status = 0;
2873 u8 add_status = 0;
2874
2875 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2876 dev_err(&adapter->pdev->dev,
2877 "FW Image not properly aligned. "
2878 "Length must be 4 byte aligned.\n");
2879 status = -EINVAL;
2880 goto lancer_fw_exit;
2881 }
2882
2883 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2884 + LANCER_FW_DOWNLOAD_CHUNK;
2885 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2886 &flash_cmd.dma, GFP_KERNEL);
2887 if (!flash_cmd.va) {
2888 status = -ENOMEM;
2889 dev_err(&adapter->pdev->dev,
2890 "Memory allocation failure while flashing\n");
2891 goto lancer_fw_exit;
2892 }
2893
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002894 dest_image_ptr = flash_cmd.va +
2895 sizeof(struct lancer_cmd_req_write_object);
2896 image_size = fw->size;
2897 data_ptr = fw->data;
2898
2899 while (image_size) {
2900 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2901
2902 /* Copy the image chunk content. */
2903 memcpy(dest_image_ptr, data_ptr, chunk_size);
2904
2905 status = lancer_cmd_write_object(adapter, &flash_cmd,
2906 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2907 &data_written, &add_status);
2908
2909 if (status)
2910 break;
2911
2912 offset += data_written;
2913 data_ptr += data_written;
2914 image_size -= data_written;
2915 }
2916
2917 if (!status) {
2918 /* Commit the FW written */
2919 status = lancer_cmd_write_object(adapter, &flash_cmd,
2920 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2921 &data_written, &add_status);
2922 }
2923
2924 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2925 flash_cmd.dma);
2926 if (status) {
2927 dev_err(&adapter->pdev->dev,
2928 "Firmware load error. "
2929 "Status code: 0x%x Additional Status: 0x%x\n",
2930 status, add_status);
2931 goto lancer_fw_exit;
2932 }
2933
2934 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2935lancer_fw_exit:
2936 return status;
2937}
2938
2939static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2940{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002941 struct flash_file_hdr_g2 *fhdr;
2942 struct flash_file_hdr_g3 *fhdr3;
2943 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002944 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002945 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002946 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002947
2948 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002949 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002950
Ajit Khaparde84517482009-09-04 03:12:16 +00002951 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002952 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2953 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002954 if (!flash_cmd.va) {
2955 status = -ENOMEM;
2956 dev_err(&adapter->pdev->dev,
2957 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002958 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002959 }
2960
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002961 if ((adapter->generation == BE_GEN3) &&
2962 (get_ufigen_type(fhdr) == BE_GEN3)) {
2963 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002964 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2965 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002966 img_hdr_ptr = (struct image_hdr *) (fw->data +
2967 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002968 i * sizeof(struct image_hdr)));
2969 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2970 status = be_flash_data(adapter, fw, &flash_cmd,
2971 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002972 }
2973 } else if ((adapter->generation == BE_GEN2) &&
2974 (get_ufigen_type(fhdr) == BE_GEN2)) {
2975 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2976 } else {
2977 dev_err(&adapter->pdev->dev,
2978 "UFI and Interface are not compatible for flashing\n");
2979 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002980 }
2981
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002982 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2983 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002984 if (status) {
2985 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002986 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002987 }
2988
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002989 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002990
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002991be_fw_exit:
2992 return status;
2993}
2994
2995int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2996{
2997 const struct firmware *fw;
2998 int status;
2999
3000 if (!netif_running(adapter->netdev)) {
3001 dev_err(&adapter->pdev->dev,
3002 "Firmware load not allowed (interface is down)\n");
3003 return -1;
3004 }
3005
3006 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3007 if (status)
3008 goto fw_exit;
3009
3010 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3011
3012 if (lancer_chip(adapter))
3013 status = lancer_fw_download(adapter, fw);
3014 else
3015 status = be_fw_download(adapter, fw);
3016
Ajit Khaparde84517482009-09-04 03:12:16 +00003017fw_exit:
3018 release_firmware(fw);
3019 return status;
3020}
3021
stephen hemmingere5686ad2012-01-05 19:10:25 +00003022static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023 .ndo_open = be_open,
3024 .ndo_stop = be_close,
3025 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003026 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003027 .ndo_set_mac_address = be_mac_addr_set,
3028 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003029 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003030 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003031 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3032 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003033 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003034 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003035 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003036 .ndo_get_vf_config = be_get_vf_config,
3037#ifdef CONFIG_NET_POLL_CONTROLLER
3038 .ndo_poll_controller = be_netpoll,
3039#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003040};
3041
3042static void be_netdev_init(struct net_device *netdev)
3043{
3044 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003045 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003046 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003048 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003049 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3050 NETIF_F_HW_VLAN_TX;
3051 if (be_multi_rxq(adapter))
3052 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003053
3054 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003055 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003056
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003057 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003058 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003059
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003060 netdev->flags |= IFF_MULTICAST;
3061
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003062 netif_set_gso_max_size(netdev, 65535);
3063
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003064 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003065
3066 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003068 for_all_evt_queues(adapter, eqo, i)
3069 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070}
3071
3072static void be_unmap_pci_bars(struct be_adapter *adapter)
3073{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003074 if (adapter->csr)
3075 iounmap(adapter->csr);
3076 if (adapter->db)
3077 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003078}
3079
3080static int be_map_pci_bars(struct be_adapter *adapter)
3081{
3082 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003083 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003084
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003085 if (lancer_chip(adapter)) {
3086 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3087 pci_resource_len(adapter->pdev, 0));
3088 if (addr == NULL)
3089 return -ENOMEM;
3090 adapter->db = addr;
3091 return 0;
3092 }
3093
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003094 if (be_physfn(adapter)) {
3095 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3096 pci_resource_len(adapter->pdev, 2));
3097 if (addr == NULL)
3098 return -ENOMEM;
3099 adapter->csr = addr;
3100 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003101
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003102 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003103 db_reg = 4;
3104 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003105 if (be_physfn(adapter))
3106 db_reg = 4;
3107 else
3108 db_reg = 0;
3109 }
3110 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3111 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003112 if (addr == NULL)
3113 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003114 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003115
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003116 return 0;
3117pci_map_err:
3118 be_unmap_pci_bars(adapter);
3119 return -ENOMEM;
3120}
3121
3122
3123static void be_ctrl_cleanup(struct be_adapter *adapter)
3124{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003125 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126
3127 be_unmap_pci_bars(adapter);
3128
3129 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3131 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003132
Sathya Perla5b8821b2011-08-02 19:57:44 +00003133 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003134 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003135 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3136 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003137}
3138
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139static int be_ctrl_init(struct be_adapter *adapter)
3140{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003141 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3142 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003143 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003144 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003145
3146 status = be_map_pci_bars(adapter);
3147 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003148 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149
3150 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003151 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3152 mbox_mem_alloc->size,
3153 &mbox_mem_alloc->dma,
3154 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003155 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003156 status = -ENOMEM;
3157 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003158 }
3159 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3160 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3161 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3162 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003163
Sathya Perla5b8821b2011-08-02 19:57:44 +00003164 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3165 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3166 &rx_filter->dma, GFP_KERNEL);
3167 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003168 status = -ENOMEM;
3169 goto free_mbox;
3170 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003171 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003172
Ivan Vecera29849612010-12-14 05:43:19 +00003173 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003174 spin_lock_init(&adapter->mcc_lock);
3175 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003176
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003177 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003178 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003179 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003180
3181free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003182 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3183 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003184
3185unmap_pci_bars:
3186 be_unmap_pci_bars(adapter);
3187
3188done:
3189 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003190}
3191
3192static void be_stats_cleanup(struct be_adapter *adapter)
3193{
Sathya Perla3abcded2010-10-03 22:12:27 -07003194 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003195
3196 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003197 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3198 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003199}
3200
3201static int be_stats_init(struct be_adapter *adapter)
3202{
Sathya Perla3abcded2010-10-03 22:12:27 -07003203 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003204
Selvin Xavier005d5692011-05-16 07:36:35 +00003205 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003206 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003207 } else {
3208 if (lancer_chip(adapter))
3209 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3210 else
3211 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3212 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003213 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3214 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003215 if (cmd->va == NULL)
3216 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003217 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003218 return 0;
3219}
3220
3221static void __devexit be_remove(struct pci_dev *pdev)
3222{
3223 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003224
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003225 if (!adapter)
3226 return;
3227
3228 unregister_netdev(adapter->netdev);
3229
Sathya Perla5fb379e2009-06-18 00:02:59 +00003230 be_clear(adapter);
3231
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003232 be_stats_cleanup(adapter);
3233
3234 be_ctrl_cleanup(adapter);
3235
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003236 be_sriov_disable(adapter);
3237
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238 pci_set_drvdata(pdev, NULL);
3239 pci_release_regions(pdev);
3240 pci_disable_device(pdev);
3241
3242 free_netdev(adapter->netdev);
3243}
3244
Sathya Perla2243e2e2009-11-22 22:02:03 +00003245static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003246{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003247 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003248
Sathya Perla3abcded2010-10-03 22:12:27 -07003249 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3250 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003251 if (status)
3252 return status;
3253
Sathya Perla752961a2011-10-24 02:45:03 +00003254 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003255 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3256 else
3257 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3258
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003259 status = be_cmd_get_cntl_attributes(adapter);
3260 if (status)
3261 return status;
3262
Sathya Perla2243e2e2009-11-22 22:02:03 +00003263 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003264}
3265
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003266static int be_dev_family_check(struct be_adapter *adapter)
3267{
3268 struct pci_dev *pdev = adapter->pdev;
3269 u32 sli_intf = 0, if_type;
3270
3271 switch (pdev->device) {
3272 case BE_DEVICE_ID1:
3273 case OC_DEVICE_ID1:
3274 adapter->generation = BE_GEN2;
3275 break;
3276 case BE_DEVICE_ID2:
3277 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003278 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003279 adapter->generation = BE_GEN3;
3280 break;
3281 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003282 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003283 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3284 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3285 SLI_INTF_IF_TYPE_SHIFT;
3286
3287 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3288 if_type != 0x02) {
3289 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3290 return -EINVAL;
3291 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003292 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3293 SLI_INTF_FAMILY_SHIFT);
3294 adapter->generation = BE_GEN3;
3295 break;
3296 default:
3297 adapter->generation = 0;
3298 }
3299 return 0;
3300}
3301
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003302static int lancer_wait_ready(struct be_adapter *adapter)
3303{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003304#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003305 u32 sliport_status;
3306 int status = 0, i;
3307
3308 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3309 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3310 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3311 break;
3312
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003313 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003314 }
3315
3316 if (i == SLIPORT_READY_TIMEOUT)
3317 status = -1;
3318
3319 return status;
3320}
3321
3322static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3323{
3324 int status;
3325 u32 sliport_status, err, reset_needed;
3326 status = lancer_wait_ready(adapter);
3327 if (!status) {
3328 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3329 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3330 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3331 if (err && reset_needed) {
3332 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3333 adapter->db + SLIPORT_CONTROL_OFFSET);
3334
3335 /* check adapter has corrected the error */
3336 status = lancer_wait_ready(adapter);
3337 sliport_status = ioread32(adapter->db +
3338 SLIPORT_STATUS_OFFSET);
3339 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3340 SLIPORT_STATUS_RN_MASK);
3341 if (status || sliport_status)
3342 status = -1;
3343 } else if (err || reset_needed) {
3344 status = -1;
3345 }
3346 }
3347 return status;
3348}
3349
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003350static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3351{
3352 int status;
3353 u32 sliport_status;
3354
3355 if (adapter->eeh_err || adapter->ue_detected)
3356 return;
3357
3358 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3359
3360 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3361 dev_err(&adapter->pdev->dev,
3362 "Adapter in error state."
3363 "Trying to recover.\n");
3364
3365 status = lancer_test_and_set_rdy_state(adapter);
3366 if (status)
3367 goto err;
3368
3369 netif_device_detach(adapter->netdev);
3370
3371 if (netif_running(adapter->netdev))
3372 be_close(adapter->netdev);
3373
3374 be_clear(adapter);
3375
3376 adapter->fw_timeout = false;
3377
3378 status = be_setup(adapter);
3379 if (status)
3380 goto err;
3381
3382 if (netif_running(adapter->netdev)) {
3383 status = be_open(adapter->netdev);
3384 if (status)
3385 goto err;
3386 }
3387
3388 netif_device_attach(adapter->netdev);
3389
3390 dev_err(&adapter->pdev->dev,
3391 "Adapter error recovery succeeded\n");
3392 }
3393 return;
3394err:
3395 dev_err(&adapter->pdev->dev,
3396 "Adapter error recovery failed\n");
3397}
3398
3399static void be_worker(struct work_struct *work)
3400{
3401 struct be_adapter *adapter =
3402 container_of(work, struct be_adapter, work.work);
3403 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003404 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003405 int i;
3406
3407 if (lancer_chip(adapter))
3408 lancer_test_and_recover_fn_err(adapter);
3409
3410 be_detect_dump_ue(adapter);
3411
3412 /* when interrupts are not yet enabled, just reap any pending
3413 * mcc completions */
3414 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003415 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003416 goto reschedule;
3417 }
3418
3419 if (!adapter->stats_cmd_sent) {
3420 if (lancer_chip(adapter))
3421 lancer_cmd_get_pport_stats(adapter,
3422 &adapter->stats_cmd);
3423 else
3424 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3425 }
3426
3427 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003428 if (rxo->rx_post_starved) {
3429 rxo->rx_post_starved = false;
3430 be_post_rx_frags(rxo, GFP_KERNEL);
3431 }
3432 }
3433
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003434 for_all_evt_queues(adapter, eqo, i)
3435 be_eqd_update(adapter, eqo);
3436
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003437reschedule:
3438 adapter->work_counter++;
3439 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3440}
3441
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003442static int __devinit be_probe(struct pci_dev *pdev,
3443 const struct pci_device_id *pdev_id)
3444{
3445 int status = 0;
3446 struct be_adapter *adapter;
3447 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003448
3449 status = pci_enable_device(pdev);
3450 if (status)
3451 goto do_none;
3452
3453 status = pci_request_regions(pdev, DRV_NAME);
3454 if (status)
3455 goto disable_dev;
3456 pci_set_master(pdev);
3457
Sathya Perla3c8def92011-06-12 20:01:58 +00003458 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003459 if (netdev == NULL) {
3460 status = -ENOMEM;
3461 goto rel_reg;
3462 }
3463 adapter = netdev_priv(netdev);
3464 adapter->pdev = pdev;
3465 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003466
3467 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003468 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003469 goto free_netdev;
3470
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003471 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003472 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003473
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003474 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003475 if (!status) {
3476 netdev->features |= NETIF_F_HIGHDMA;
3477 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003478 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003479 if (status) {
3480 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3481 goto free_netdev;
3482 }
3483 }
3484
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003485 status = be_sriov_enable(adapter);
3486 if (status)
3487 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003488
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003489 status = be_ctrl_init(adapter);
3490 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003491 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003492
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003493 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003494 status = lancer_wait_ready(adapter);
3495 if (!status) {
3496 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3497 adapter->db + SLIPORT_CONTROL_OFFSET);
3498 status = lancer_test_and_set_rdy_state(adapter);
3499 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003500 if (status) {
3501 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003502 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003503 }
3504 }
3505
Sathya Perla2243e2e2009-11-22 22:02:03 +00003506 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003507 if (be_physfn(adapter)) {
3508 status = be_cmd_POST(adapter);
3509 if (status)
3510 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003511 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003512
3513 /* tell fw we're ready to fire cmds */
3514 status = be_cmd_fw_init(adapter);
3515 if (status)
3516 goto ctrl_clean;
3517
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003518 status = be_cmd_reset_function(adapter);
3519 if (status)
3520 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003521
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003522 /* The INTR bit may be set in the card when probed by a kdump kernel
3523 * after a crash.
3524 */
3525 if (!lancer_chip(adapter))
3526 be_intr_set(adapter, false);
3527
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003528 status = be_stats_init(adapter);
3529 if (status)
3530 goto ctrl_clean;
3531
Sathya Perla2243e2e2009-11-22 22:02:03 +00003532 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533 if (status)
3534 goto stats_clean;
3535
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003536 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003537 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003538
Sathya Perla5fb379e2009-06-18 00:02:59 +00003539 status = be_setup(adapter);
3540 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003541 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003542
Sathya Perla3abcded2010-10-03 22:12:27 -07003543 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003544 status = register_netdev(netdev);
3545 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003546 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003547
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003548 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3549 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003550
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003551 return 0;
3552
Sathya Perla5fb379e2009-06-18 00:02:59 +00003553unsetup:
3554 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003555msix_disable:
3556 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003557stats_clean:
3558 be_stats_cleanup(adapter);
3559ctrl_clean:
3560 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003561disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003562 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003563free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003564 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003565 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003566rel_reg:
3567 pci_release_regions(pdev);
3568disable_dev:
3569 pci_disable_device(pdev);
3570do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003571 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003572 return status;
3573}
3574
3575static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3576{
3577 struct be_adapter *adapter = pci_get_drvdata(pdev);
3578 struct net_device *netdev = adapter->netdev;
3579
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003580 if (adapter->wol)
3581 be_setup_wol(adapter, true);
3582
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003583 netif_device_detach(netdev);
3584 if (netif_running(netdev)) {
3585 rtnl_lock();
3586 be_close(netdev);
3587 rtnl_unlock();
3588 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003589 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003590
3591 pci_save_state(pdev);
3592 pci_disable_device(pdev);
3593 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3594 return 0;
3595}
3596
3597static int be_resume(struct pci_dev *pdev)
3598{
3599 int status = 0;
3600 struct be_adapter *adapter = pci_get_drvdata(pdev);
3601 struct net_device *netdev = adapter->netdev;
3602
3603 netif_device_detach(netdev);
3604
3605 status = pci_enable_device(pdev);
3606 if (status)
3607 return status;
3608
3609 pci_set_power_state(pdev, 0);
3610 pci_restore_state(pdev);
3611
Sathya Perla2243e2e2009-11-22 22:02:03 +00003612 /* tell fw we're ready to fire cmds */
3613 status = be_cmd_fw_init(adapter);
3614 if (status)
3615 return status;
3616
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003617 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003618 if (netif_running(netdev)) {
3619 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003620 be_open(netdev);
3621 rtnl_unlock();
3622 }
3623 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003624
3625 if (adapter->wol)
3626 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003627
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003628 return 0;
3629}
3630
Sathya Perla82456b02010-02-17 01:35:37 +00003631/*
3632 * An FLR will stop BE from DMAing any data.
3633 */
3634static void be_shutdown(struct pci_dev *pdev)
3635{
3636 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003637
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003638 if (!adapter)
3639 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003640
Sathya Perla0f4a6822011-03-21 20:49:28 +00003641 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003642
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003643 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003644
Sathya Perla82456b02010-02-17 01:35:37 +00003645 if (adapter->wol)
3646 be_setup_wol(adapter, true);
3647
Ajit Khaparde57841862011-04-06 18:08:43 +00003648 be_cmd_reset_function(adapter);
3649
Sathya Perla82456b02010-02-17 01:35:37 +00003650 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003651}
3652
Sathya Perlacf588472010-02-14 21:22:01 +00003653static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3654 pci_channel_state_t state)
3655{
3656 struct be_adapter *adapter = pci_get_drvdata(pdev);
3657 struct net_device *netdev = adapter->netdev;
3658
3659 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3660
3661 adapter->eeh_err = true;
3662
3663 netif_device_detach(netdev);
3664
3665 if (netif_running(netdev)) {
3666 rtnl_lock();
3667 be_close(netdev);
3668 rtnl_unlock();
3669 }
3670 be_clear(adapter);
3671
3672 if (state == pci_channel_io_perm_failure)
3673 return PCI_ERS_RESULT_DISCONNECT;
3674
3675 pci_disable_device(pdev);
3676
3677 return PCI_ERS_RESULT_NEED_RESET;
3678}
3679
3680static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3681{
3682 struct be_adapter *adapter = pci_get_drvdata(pdev);
3683 int status;
3684
3685 dev_info(&adapter->pdev->dev, "EEH reset\n");
3686 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003687 adapter->ue_detected = false;
3688 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003689
3690 status = pci_enable_device(pdev);
3691 if (status)
3692 return PCI_ERS_RESULT_DISCONNECT;
3693
3694 pci_set_master(pdev);
3695 pci_set_power_state(pdev, 0);
3696 pci_restore_state(pdev);
3697
3698 /* Check if card is ok and fw is ready */
3699 status = be_cmd_POST(adapter);
3700 if (status)
3701 return PCI_ERS_RESULT_DISCONNECT;
3702
3703 return PCI_ERS_RESULT_RECOVERED;
3704}
3705
3706static void be_eeh_resume(struct pci_dev *pdev)
3707{
3708 int status = 0;
3709 struct be_adapter *adapter = pci_get_drvdata(pdev);
3710 struct net_device *netdev = adapter->netdev;
3711
3712 dev_info(&adapter->pdev->dev, "EEH resume\n");
3713
3714 pci_save_state(pdev);
3715
3716 /* tell fw we're ready to fire cmds */
3717 status = be_cmd_fw_init(adapter);
3718 if (status)
3719 goto err;
3720
3721 status = be_setup(adapter);
3722 if (status)
3723 goto err;
3724
3725 if (netif_running(netdev)) {
3726 status = be_open(netdev);
3727 if (status)
3728 goto err;
3729 }
3730 netif_device_attach(netdev);
3731 return;
3732err:
3733 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003734}
3735
3736static struct pci_error_handlers be_eeh_handlers = {
3737 .error_detected = be_eeh_err_detected,
3738 .slot_reset = be_eeh_reset,
3739 .resume = be_eeh_resume,
3740};
3741
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003742static struct pci_driver be_driver = {
3743 .name = DRV_NAME,
3744 .id_table = be_dev_ids,
3745 .probe = be_probe,
3746 .remove = be_remove,
3747 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003748 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003749 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003750 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003751};
3752
3753static int __init be_init_module(void)
3754{
Joe Perches8e95a202009-12-03 07:58:21 +00003755 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3756 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003757 printk(KERN_WARNING DRV_NAME
3758 " : Module param rx_frag_size must be 2048/4096/8192."
3759 " Using 2048\n");
3760 rx_frag_size = 2048;
3761 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003763 return pci_register_driver(&be_driver);
3764}
3765module_init(be_init_module);
3766
3767static void __exit be_exit_module(void)
3768{
3769 pci_unregister_driver(&be_driver);
3770}
3771module_exit(be_exit_module);