blob: 28f2b254d22726a20492ba6910a02f347a9f8dbd [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000147 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
Sathya Perla8788fdc2009-07-27 22:52:03 +0000152static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perlacf588472010-02-14 21:22:01 +0000156 if (adapter->eeh_err)
157 return;
158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000189
190 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192}
193
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000201
202 if (adapter->eeh_err)
203 return;
204
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212}
213
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
221 if (adapter->eeh_err)
222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
Somnath Koture3a7ae22011-10-27 07:14:05 +0000241 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000249 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 if (status)
251 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252
Somnath Koture3a7ae22011-10-27 07:14:05 +0000253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 return status;
260}
261
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000262static void populate_be2_stats(struct be_adapter *adapter)
263{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000267 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270
Sathya Perlaac124ff2011-07-25 19:10:14 +0000271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000289 drvs->rx_address_mismatch_drops =
290 port_stats->rx_address_mismatch_drops +
291 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 drvs->rx_alignment_symbol_errors =
293 port_stats->rx_alignment_symbol_errors;
294
295 drvs->tx_pauseframes = port_stats->tx_pauseframes;
296 drvs->tx_controlframes = port_stats->tx_controlframes;
297
298 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000299 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000316 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319
Sathya Perlaac124ff2011-07-25 19:10:14 +0000320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000340 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 drvs->forwarded_packets = rxf_stats->forwarded_packets;
350 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
352 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
Selvin Xavier005d5692011-05-16 07:36:35 +0000356static void populate_lancer_stats(struct be_adapter *adapter)
357{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358
Selvin Xavier005d5692011-05-16 07:36:35 +0000359 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000360 struct lancer_pport_stats *pport_stats =
361 pport_stats_from_cmd(adapter);
362
363 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
364 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
365 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
366 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
370 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
371 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
372 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
373 drvs->rx_dropped_tcp_length =
374 pport_stats->rx_dropped_invalid_tcp_length;
375 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
376 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
377 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
378 drvs->rx_dropped_header_too_small =
379 pport_stats->rx_dropped_header_too_small;
380 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000381 drvs->rx_address_mismatch_drops =
382 pport_stats->rx_address_mismatch_drops +
383 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->forwarded_packets = pport_stats->num_forwards_lo;
390 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394
Sathya Perla09c1c682011-08-22 19:41:53 +0000395static void accumulate_16bit_val(u32 *acc, u16 val)
396{
397#define lo(x) (x & 0xFFFF)
398#define hi(x) (x & 0xFFFF0000)
399 bool wrapped = val < lo(*acc);
400 u32 newacc = hi(*acc) + val;
401
402 if (wrapped)
403 newacc += 65536;
404 ACCESS_ONCE(*acc) = newacc;
405}
406
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000407void be_parse_stats(struct be_adapter *adapter)
408{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000409 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
410 struct be_rx_obj *rxo;
411 int i;
412
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 if (adapter->generation == BE_GEN3) {
414 if (lancer_chip(adapter))
415 populate_lancer_stats(adapter);
416 else
417 populate_be3_stats(adapter);
418 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000420 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421
422 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000423 for_all_rx_queues(adapter, rxo, i) {
424 /* below erx HW counter can actually wrap around after
425 * 65535. Driver accumulates a 32-bit value
426 */
427 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
428 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
429 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430}
431
Sathya Perlaab1594e2011-07-25 19:10:15 +0000432static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
433 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700434{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000435 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700437 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000438 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439 u64 pkts, bytes;
440 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700441 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700442
Sathya Perla3abcded2010-10-03 22:12:27 -0700443 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000444 const struct be_rx_stats *rx_stats = rx_stats(rxo);
445 do {
446 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
447 pkts = rx_stats(rxo)->rx_pkts;
448 bytes = rx_stats(rxo)->rx_bytes;
449 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
450 stats->rx_packets += pkts;
451 stats->rx_bytes += bytes;
452 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
453 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
454 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700455 }
456
Sathya Perla3c8def92011-06-12 20:01:58 +0000457 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000458 const struct be_tx_stats *tx_stats = tx_stats(txo);
459 do {
460 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
461 pkts = tx_stats(txo)->tx_pkts;
462 bytes = tx_stats(txo)->tx_bytes;
463 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
464 stats->tx_packets += pkts;
465 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000466 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467
468 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000469 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000470 drvs->rx_alignment_symbol_errors +
471 drvs->rx_in_range_errors +
472 drvs->rx_out_range_errors +
473 drvs->rx_frame_too_long +
474 drvs->rx_dropped_too_small +
475 drvs->rx_dropped_too_short +
476 drvs->rx_dropped_header_too_small +
477 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000478 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000481 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000482 drvs->rx_out_range_errors +
483 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000484
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
487 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000489
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490 /* receiver fifo overrun */
491 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000493 drvs->rx_input_fifo_overflow_drop +
494 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496}
497
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000498void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700499{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500 struct net_device *netdev = adapter->netdev;
501
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000502 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000503 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000504 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700505 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000506
507 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
508 netif_carrier_on(netdev);
509 else
510 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511}
512
Sathya Perla3c8def92011-06-12 20:01:58 +0000513static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000514 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perla3c8def92011-06-12 20:01:58 +0000516 struct be_tx_stats *stats = tx_stats(txo);
517
Sathya Perlaab1594e2011-07-25 19:10:15 +0000518 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 stats->tx_reqs++;
520 stats->tx_wrbs += wrb_cnt;
521 stats->tx_bytes += copied;
522 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526}
527
528/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000529static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
530 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700532 int cnt = (skb->len > skb->data_len);
533
534 cnt += skb_shinfo(skb)->nr_frags;
535
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536 /* to account for hdr wrb */
537 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000538 if (lancer_chip(adapter) || !(cnt & 1)) {
539 *dummy = false;
540 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541 /* add a dummy to make it an even num */
542 cnt++;
543 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000544 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700545 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
546 return cnt;
547}
548
549static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
550{
551 wrb->frag_pa_hi = upper_32_bits(addr);
552 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
553 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
554}
555
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000556static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
557 struct sk_buff *skb)
558{
559 u8 vlan_prio;
560 u16 vlan_tag;
561
562 vlan_tag = vlan_tx_tag_get(skb);
563 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
564 /* If vlan priority provided by OS is NOT in available bmap */
565 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
566 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
567 adapter->recommended_prio;
568
569 return vlan_tag;
570}
571
Somnath Koturcc4ce022010-10-21 07:11:14 -0700572static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
573 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700574{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000575 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700576
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577 memset(hdr, 0, sizeof(*hdr));
578
579 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
580
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000581 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
584 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000585 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000587 if (lancer_chip(adapter) && adapter->sli_family ==
588 LANCER_A0_SLI_FAMILY) {
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
590 if (is_tcp_pkt(skb))
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
592 tcpcs, hdr, 1);
593 else if (is_udp_pkt(skb))
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
595 udpcs, hdr, 1);
596 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
598 if (is_tcp_pkt(skb))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
602 }
603
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700604 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000606 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608 }
609
610 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
614}
615
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000616static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000617 bool unmap_single)
618{
619 dma_addr_t dma;
620
621 be_dws_le_to_cpu(wrb, sizeof(*wrb));
622
623 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000624 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000625 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000626 dma_unmap_single(dev, dma, wrb->frag_len,
627 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000628 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000630 }
631}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
Sathya Perla3c8def92011-06-12 20:01:58 +0000633static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
635{
Sathya Perla7101e112010-03-22 20:41:12 +0000636 dma_addr_t busaddr;
637 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000638 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 struct be_eth_wrb *wrb;
641 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000642 bool map_single = false;
643 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645 hdr = queue_head_node(txq);
646 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000647 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
David S. Millerebc8d2a2009-06-09 01:01:31 -0700649 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700650 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
652 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000653 goto dma_err;
654 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700655 wrb = queue_head_node(txq);
656 wrb_fill(wrb, busaddr, len);
657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 queue_head_inc(txq);
659 copied += len;
660 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000663 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700664 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000665 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000666 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000667 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000668 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 be_dws_cpu_to_le(wrb, sizeof(*wrb));
672 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674 }
675
676 if (dummy_wrb) {
677 wrb = queue_head_node(txq);
678 wrb_fill(wrb, 0, 0);
679 be_dws_cpu_to_le(wrb, sizeof(*wrb));
680 queue_head_inc(txq);
681 }
682
Somnath Koturcc4ce022010-10-21 07:11:14 -0700683 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 be_dws_cpu_to_le(hdr, sizeof(*hdr));
685
686 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000687dma_err:
688 txq->head = map_head;
689 while (copied) {
690 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000691 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000692 map_single = false;
693 copied -= wrb->frag_len;
694 queue_head_inc(txq);
695 }
696 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697}
698
Stephen Hemminger613573252009-08-31 19:50:58 +0000699static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700700 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701{
702 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000703 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
704 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705 u32 wrb_cnt = 0, copied = 0;
706 u32 start = txq->head;
707 bool dummy_wrb, stopped = false;
708
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000709 /* For vlan tagged pkts, BE
710 * 1) calculates checksum even when CSO is not requested
711 * 2) calculates checksum wrongly for padded pkt less than
712 * 60 bytes long.
713 * As a workaround disable TX vlan offloading in such cases.
714 */
715 if (unlikely(vlan_tx_tag_present(skb) &&
716 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 goto tx_drop;
720
721 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
722 if (unlikely(!skb))
723 goto tx_drop;
724
725 skb->vlan_tci = 0;
726 }
727
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000728 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729
Sathya Perla3c8def92011-06-12 20:01:58 +0000730 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000731 if (copied) {
732 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000733 BUG_ON(txo->sent_skb_list[start]);
734 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000736 /* Ensure txq has space for the next skb; Else stop the queue
737 * *BEFORE* ringing the tx doorbell, so that we serialze the
738 * tx compls of the current transmit which'll wake up the queue
739 */
Sathya Perla7101e112010-03-22 20:41:12 +0000740 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000741 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
742 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000743 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000744 stopped = true;
745 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000747 be_txq_notify(adapter, txq->id, wrb_cnt);
748
Sathya Perla3c8def92011-06-12 20:01:58 +0000749 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000750 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 } else {
752 txq->head = start;
753 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000755tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756 return NETDEV_TX_OK;
757}
758
759static int be_change_mtu(struct net_device *netdev, int new_mtu)
760{
761 struct be_adapter *adapter = netdev_priv(netdev);
762 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000763 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
764 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 dev_info(&adapter->pdev->dev,
766 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000767 BE_MIN_MTU,
768 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700769 return -EINVAL;
770 }
771 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
772 netdev->mtu, new_mtu);
773 netdev->mtu = new_mtu;
774 return 0;
775}
776
777/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000778 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
779 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000781static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000783 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784 u16 vtag[BE_NUM_VLANS_SUPPORTED];
785 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000786 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000787
788 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000789 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
790 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
791 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000792 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
Ajit Khaparde82903e42010-02-09 01:34:57 +0000798 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000800 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 if (adapter->vlan_tag[i]) {
802 vtag[ntags] = cpu_to_le16(i);
803 ntags++;
804 }
805 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
807 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700809 status = be_cmd_vlan_config(adapter, adapter->if_handle,
810 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700811 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000812
Sathya Perlab31c50a2009-09-17 10:30:13 -0700813 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814}
815
Jiri Pirko8e586132011-12-08 19:52:37 -0500816static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817{
818 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000819 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000821 if (!be_physfn(adapter)) {
822 status = -EINVAL;
823 goto ret;
824 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000825
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000827 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000828 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500829
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000830 if (!status)
831 adapter->vlans_added++;
832 else
833 adapter->vlan_tag[vid] = 0;
834ret:
835 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836}
837
Jiri Pirko8e586132011-12-08 19:52:37 -0500838static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839{
840 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000841 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000843 if (!be_physfn(adapter)) {
844 status = -EINVAL;
845 goto ret;
846 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000847
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000849 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000850 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500851
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000852 if (!status)
853 adapter->vlans_added--;
854 else
855 adapter->vlan_tag[vid] = 1;
856ret:
857 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858}
859
Sathya Perlaa54769f2011-10-24 02:45:00 +0000860static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861{
862 struct be_adapter *adapter = netdev_priv(netdev);
863
864 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000865 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000866 adapter->promiscuous = true;
867 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000869
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300870 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000871 if (adapter->promiscuous) {
872 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000873 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000874
875 if (adapter->vlans_added)
876 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000877 }
878
Sathya Perlae7b909a2009-11-22 22:01:10 +0000879 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000880 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000881 netdev_mc_count(netdev) > BE_MAX_MC) {
882 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000883 goto done;
884 }
885
Sathya Perla5b8821b2011-08-02 19:57:44 +0000886 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000887done:
888 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700889}
890
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000891static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
892{
893 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000894 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000895 int status;
896
Sathya Perla11ac75e2011-12-13 00:58:50 +0000897 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000898 return -EPERM;
899
Sathya Perla11ac75e2011-12-13 00:58:50 +0000900 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000901 return -EINVAL;
902
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000903 if (lancer_chip(adapter)) {
904 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
905 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000906 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
907 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000908
Sathya Perla11ac75e2011-12-13 00:58:50 +0000909 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
910 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000911 }
912
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000913 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000914 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
915 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000916 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000917 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000918
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000919 return status;
920}
921
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000922static int be_get_vf_config(struct net_device *netdev, int vf,
923 struct ifla_vf_info *vi)
924{
925 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000926 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000927
Sathya Perla11ac75e2011-12-13 00:58:50 +0000928 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000929 return -EPERM;
930
Sathya Perla11ac75e2011-12-13 00:58:50 +0000931 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000932 return -EINVAL;
933
934 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000935 vi->tx_rate = vf_cfg->tx_rate;
936 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000937 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000938 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000939
940 return 0;
941}
942
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000943static int be_set_vf_vlan(struct net_device *netdev,
944 int vf, u16 vlan, u8 qos)
945{
946 struct be_adapter *adapter = netdev_priv(netdev);
947 int status = 0;
948
Sathya Perla11ac75e2011-12-13 00:58:50 +0000949 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000950 return -EPERM;
951
Sathya Perla11ac75e2011-12-13 00:58:50 +0000952 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000953 return -EINVAL;
954
955 if (vlan) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000956 adapter->vf_cfg[vf].vlan_tag = vlan;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000957 adapter->vlans_added++;
958 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000959 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000960 adapter->vlans_added--;
961 }
962
963 status = be_vid_config(adapter, true, vf);
964
965 if (status)
966 dev_info(&adapter->pdev->dev,
967 "VLAN %d config on VF %d failed\n", vlan, vf);
968 return status;
969}
970
Ajit Khapardee1d18732010-07-23 01:52:13 +0000971static int be_set_vf_tx_rate(struct net_device *netdev,
972 int vf, int rate)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 int status = 0;
976
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +0000978 return -EPERM;
979
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000980 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +0000981 return -EINVAL;
982
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000983 if (rate < 100 || rate > 10000) {
984 dev_err(&adapter->pdev->dev,
985 "tx rate must be between 100 and 10000 Mbps\n");
986 return -EINVAL;
987 }
Ajit Khapardee1d18732010-07-23 01:52:13 +0000988
Ajit Khaparde856c4012011-02-11 13:32:32 +0000989 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000990
991 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000992 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +0000993 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000994 else
995 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000996 return status;
997}
998
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000999static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001001 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001002 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001003 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001004 u64 pkts;
1005 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001007 if (!eqo->enable_aic) {
1008 eqd = eqo->eqd;
1009 goto modify_eqd;
1010 }
1011
1012 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001013 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001015 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1016
Sathya Perla4097f662009-03-24 16:40:13 -07001017 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001018 if (time_before(now, stats->rx_jiffies)) {
1019 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001020 return;
1021 }
1022
Sathya Perlaac124ff2011-07-25 19:10:14 +00001023 /* Update once a second */
1024 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001025 return;
1026
Sathya Perlaab1594e2011-07-25 19:10:15 +00001027 do {
1028 start = u64_stats_fetch_begin_bh(&stats->sync);
1029 pkts = stats->rx_pkts;
1030 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1031
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001032 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001033 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001034 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001035 eqd = (stats->rx_pps / 110000) << 3;
1036 eqd = min(eqd, eqo->max_eqd);
1037 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001038 if (eqd < 10)
1039 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001040
1041modify_eqd:
1042 if (eqd != eqo->cur_eqd) {
1043 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1044 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001045 }
Sathya Perla4097f662009-03-24 16:40:13 -07001046}
1047
Sathya Perla3abcded2010-10-03 22:12:27 -07001048static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001049 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001050{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001051 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001052
Sathya Perlaab1594e2011-07-25 19:10:15 +00001053 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001054 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001055 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001056 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001057 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001058 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001059 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001060 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001061 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062}
1063
Sathya Perla2e588f82011-03-11 02:49:26 +00001064static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001065{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001066 /* L4 checksum is not reliable for non TCP/UDP packets.
1067 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001068 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1069 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001070}
1071
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001072static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1073 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001075 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001077 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078
Sathya Perla3abcded2010-10-03 22:12:27 -07001079 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080 BUG_ON(!rx_page_info->page);
1081
Ajit Khaparde205859a2010-02-09 01:34:21 +00001082 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001083 dma_unmap_page(&adapter->pdev->dev,
1084 dma_unmap_addr(rx_page_info, bus),
1085 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001086 rx_page_info->last_page_user = false;
1087 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088
1089 atomic_dec(&rxq->used);
1090 return rx_page_info;
1091}
1092
1093/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001094static void be_rx_compl_discard(struct be_rx_obj *rxo,
1095 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096{
Sathya Perla3abcded2010-10-03 22:12:27 -07001097 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001099 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001101 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001102 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001103 put_page(page_info->page);
1104 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001105 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106 }
1107}
1108
1109/*
1110 * skb_fill_rx_data forms a complete skb for an ether frame
1111 * indicated by rxcp.
1112 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001113static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1114 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115{
Sathya Perla3abcded2010-10-03 22:12:27 -07001116 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001118 u16 i, j;
1119 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120 u8 *start;
1121
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001122 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001123 start = page_address(page_info->page) + page_info->page_offset;
1124 prefetch(start);
1125
1126 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001127 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128
1129 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001130 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131 memcpy(skb->data, start, hdr_len);
1132 skb->len = curr_frag_len;
1133 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1134 /* Complete packet has now been moved to data */
1135 put_page(page_info->page);
1136 skb->data_len = 0;
1137 skb->tail += curr_frag_len;
1138 } else {
1139 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001140 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141 skb_shinfo(skb)->frags[0].page_offset =
1142 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001143 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001145 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146 skb->tail += hdr_len;
1147 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001148 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149
Sathya Perla2e588f82011-03-11 02:49:26 +00001150 if (rxcp->pkt_size <= rx_frag_size) {
1151 BUG_ON(rxcp->num_rcvd != 1);
1152 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153 }
1154
1155 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001156 index_inc(&rxcp->rxq_idx, rxq->len);
1157 remaining = rxcp->pkt_size - curr_frag_len;
1158 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001159 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001160 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001162 /* Coalesce all frags from the same physical page in one slot */
1163 if (page_info->page_offset == 0) {
1164 /* Fresh page */
1165 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001166 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001167 skb_shinfo(skb)->frags[j].page_offset =
1168 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001169 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001170 skb_shinfo(skb)->nr_frags++;
1171 } else {
1172 put_page(page_info->page);
1173 }
1174
Eric Dumazet9e903e02011-10-18 21:00:24 +00001175 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176 skb->len += curr_frag_len;
1177 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001178 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001179 remaining -= curr_frag_len;
1180 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001181 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001183 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184}
1185
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001186/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001187static void be_rx_compl_process(struct be_rx_obj *rxo,
1188 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001190 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001191 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001193
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001194 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001195 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001196 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001197 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198 return;
1199 }
1200
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001201 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001203 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001204 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001205 else
1206 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001208 skb->protocol = eth_type_trans(skb, netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001209 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001210 skb->rxhash = rxcp->rss_hash;
1211
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212
Jiri Pirko343e43c2011-08-25 02:50:51 +00001213 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001214 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1215
1216 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217}
1218
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001219/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001220void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1221 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001223 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001225 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001226 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 u16 remaining, curr_frag_len;
1228 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001229
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001230 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001231 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001232 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001233 return;
1234 }
1235
Sathya Perla2e588f82011-03-11 02:49:26 +00001236 remaining = rxcp->pkt_size;
1237 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001238 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239
1240 curr_frag_len = min(remaining, rx_frag_size);
1241
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001242 /* Coalesce all frags from the same physical page in one slot */
1243 if (i == 0 || page_info->page_offset == 0) {
1244 /* First frag or Fresh page */
1245 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001246 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001247 skb_shinfo(skb)->frags[j].page_offset =
1248 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001249 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001250 } else {
1251 put_page(page_info->page);
1252 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001253 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001254 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001256 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257 memset(page_info, 0, sizeof(*page_info));
1258 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001259 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001261 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 skb->len = rxcp->pkt_size;
1263 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001264 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001265 if (adapter->netdev->features & NETIF_F_RXHASH)
1266 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001267
Jiri Pirko343e43c2011-08-25 02:50:51 +00001268 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001269 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272}
1273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001274static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276{
Sathya Perla2e588f82011-03-11 02:49:26 +00001277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001302 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001303 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001304}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001306static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001308{
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1333 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001334 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001335 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001336}
1337
1338static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1339{
1340 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1341 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1342 struct be_adapter *adapter = rxo->adapter;
1343
1344 /* For checking the valid bit it is Ok to use either definition as the
1345 * valid bit is at the same position in both v0 and v1 Rx compl */
1346 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347 return NULL;
1348
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001349 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001350 be_dws_le_to_cpu(compl, sizeof(*compl));
1351
1352 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001353 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001354 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001355 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001356
Sathya Perla15d72182011-03-21 20:49:26 +00001357 if (rxcp->vlanf) {
1358 /* vlanf could be wrongly set in some cards.
1359 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001360 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001361 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001362
Sathya Perla15d72182011-03-21 20:49:26 +00001363 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001364 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001365
Somnath Kotur939cf302011-08-18 21:51:49 -07001366 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001367 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001368 rxcp->vlanf = 0;
1369 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001370
1371 /* As the compl has been parsed, reset it; we wont touch it again */
1372 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
Sathya Perla3abcded2010-10-03 22:12:27 -07001374 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 return rxcp;
1376}
1377
Eric Dumazet1829b082011-03-01 05:48:12 +00001378static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001383 gfp |= __GFP_COMP;
1384 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385}
1386
1387/*
1388 * Allocate a page, split it to fragments of size rx_frag_size and post as
1389 * receive buffers to BE
1390 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001391static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392{
Sathya Perla3abcded2010-10-03 22:12:27 -07001393 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001394 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001395 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 struct page *pagep = NULL;
1397 struct be_eth_rx_d *rxd;
1398 u64 page_dmaaddr = 0, frag_dmaaddr;
1399 u32 posted, page_offset = 0;
1400
Sathya Perla3abcded2010-10-03 22:12:27 -07001401 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1403 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001404 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001406 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 break;
1408 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001409 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1410 0, adapter->big_page_size,
1411 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412 page_info->page_offset = 0;
1413 } else {
1414 get_page(pagep);
1415 page_info->page_offset = page_offset + rx_frag_size;
1416 }
1417 page_offset = page_info->page_offset;
1418 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001419 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1421
1422 rxd = queue_head_node(rxq);
1423 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1424 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
1426 /* Any space left in the current big page for another frag? */
1427 if ((page_offset + rx_frag_size + rx_frag_size) >
1428 adapter->big_page_size) {
1429 pagep = NULL;
1430 page_info->last_page_user = true;
1431 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001432
1433 prev_page_info = page_info;
1434 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001435 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 }
1437 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001438 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001439
1440 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001442 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001443 } else if (atomic_read(&rxq->used) == 0) {
1444 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001445 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447}
1448
Sathya Perla5fb379e2009-06-18 00:02:59 +00001449static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1452
1453 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1454 return NULL;
1455
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001456 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1458
1459 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1460
1461 queue_tail_inc(tx_cq);
1462 return txcp;
1463}
1464
Sathya Perla3c8def92011-06-12 20:01:58 +00001465static u16 be_tx_compl_process(struct be_adapter *adapter,
1466 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467{
Sathya Perla3c8def92011-06-12 20:01:58 +00001468 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001469 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001470 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001472 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1473 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001475 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001477 sent_skbs[txq->tail] = NULL;
1478
1479 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001480 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001482 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001484 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001485 unmap_tx_frag(&adapter->pdev->dev, wrb,
1486 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001487 unmap_skb_hdr = false;
1488
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 num_wrbs++;
1490 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001491 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001494 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495}
1496
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001497/* Return the number of events in the event queue */
1498static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001499{
1500 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001501 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001502
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001503 do {
1504 eqe = queue_tail_node(&eqo->q);
1505 if (eqe->evt == 0)
1506 break;
1507
1508 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001509 eqe->evt = 0;
1510 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001511 queue_tail_inc(&eqo->q);
1512 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001513
1514 return num;
1515}
1516
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001517static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001518{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001519 bool rearm = false;
1520 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001521
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001522 /* Deal with any spurious interrupts that come without events */
1523 if (!num)
1524 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001525
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001526 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001527 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001528 napi_schedule(&eqo->napi);
1529
1530 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001531}
1532
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001533/* Leaves the EQ is disarmed state */
1534static void be_eq_clean(struct be_eq_obj *eqo)
1535{
1536 int num = events_get(eqo);
1537
1538 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1539}
1540
1541static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542{
1543 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001544 struct be_queue_info *rxq = &rxo->q;
1545 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001546 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 u16 tail;
1548
1549 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001550 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001551 be_rx_compl_discard(rxo, rxcp);
1552 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 }
1554
1555 /* Then free posted rx buffer that were not used */
1556 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001557 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001558 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 put_page(page_info->page);
1560 memset(page_info, 0, sizeof(*page_info));
1561 }
1562 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001563 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564}
1565
Sathya Perla3c8def92011-06-12 20:01:58 +00001566static void be_tx_compl_clean(struct be_adapter *adapter,
1567 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568{
Sathya Perla3c8def92011-06-12 20:01:58 +00001569 struct be_queue_info *tx_cq = &txo->cq;
1570 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001571 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001572 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001573 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001574 struct sk_buff *sent_skb;
1575 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576
Sathya Perlaa8e91792009-08-10 03:42:43 +00001577 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1578 do {
1579 while ((txcp = be_tx_compl_get(tx_cq))) {
1580 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1581 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001582 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001583 cmpl++;
1584 }
1585 if (cmpl) {
1586 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001587 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001588 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001589 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001590 }
1591
1592 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1593 break;
1594
1595 mdelay(1);
1596 } while (true);
1597
1598 if (atomic_read(&txq->used))
1599 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1600 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001601
1602 /* free posted tx for which compls will never arrive */
1603 while (atomic_read(&txq->used)) {
1604 sent_skb = sent_skbs[txq->tail];
1605 end_idx = txq->tail;
1606 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001607 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1608 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001609 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001610 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001611 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612}
1613
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001614static void be_evt_queues_destroy(struct be_adapter *adapter)
1615{
1616 struct be_eq_obj *eqo;
1617 int i;
1618
1619 for_all_evt_queues(adapter, eqo, i) {
1620 be_eq_clean(eqo);
1621 if (eqo->q.created)
1622 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1623 be_queue_free(adapter, &eqo->q);
1624 }
1625}
1626
1627static int be_evt_queues_create(struct be_adapter *adapter)
1628{
1629 struct be_queue_info *eq;
1630 struct be_eq_obj *eqo;
1631 int i, rc;
1632
1633 adapter->num_evt_qs = num_irqs(adapter);
1634
1635 for_all_evt_queues(adapter, eqo, i) {
1636 eqo->adapter = adapter;
1637 eqo->tx_budget = BE_TX_BUDGET;
1638 eqo->idx = i;
1639 eqo->max_eqd = BE_MAX_EQD;
1640 eqo->enable_aic = true;
1641
1642 eq = &eqo->q;
1643 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1644 sizeof(struct be_eq_entry));
1645 if (rc)
1646 return rc;
1647
1648 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1649 if (rc)
1650 return rc;
1651 }
1652 return rc;
1653}
1654
Sathya Perla5fb379e2009-06-18 00:02:59 +00001655static void be_mcc_queues_destroy(struct be_adapter *adapter)
1656{
1657 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001658
Sathya Perla8788fdc2009-07-27 22:52:03 +00001659 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001660 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001661 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001662 be_queue_free(adapter, q);
1663
Sathya Perla8788fdc2009-07-27 22:52:03 +00001664 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001665 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001666 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001667 be_queue_free(adapter, q);
1668}
1669
1670/* Must be called only after TX qs are created as MCC shares TX EQ */
1671static int be_mcc_queues_create(struct be_adapter *adapter)
1672{
1673 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001674
Sathya Perla8788fdc2009-07-27 22:52:03 +00001675 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001676 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001677 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001678 goto err;
1679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001680 /* Use the default EQ for MCC completions */
1681 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001682 goto mcc_cq_free;
1683
Sathya Perla8788fdc2009-07-27 22:52:03 +00001684 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001685 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1686 goto mcc_cq_destroy;
1687
Sathya Perla8788fdc2009-07-27 22:52:03 +00001688 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001689 goto mcc_q_free;
1690
1691 return 0;
1692
1693mcc_q_free:
1694 be_queue_free(adapter, q);
1695mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001696 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001697mcc_cq_free:
1698 be_queue_free(adapter, cq);
1699err:
1700 return -1;
1701}
1702
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703static void be_tx_queues_destroy(struct be_adapter *adapter)
1704{
1705 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001706 struct be_tx_obj *txo;
1707 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708
Sathya Perla3c8def92011-06-12 20:01:58 +00001709 for_all_tx_queues(adapter, txo, i) {
1710 q = &txo->q;
1711 if (q->created)
1712 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1713 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714
Sathya Perla3c8def92011-06-12 20:01:58 +00001715 q = &txo->cq;
1716 if (q->created)
1717 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1718 be_queue_free(adapter, q);
1719 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720}
1721
Sathya Perladafc0fe2011-10-24 02:45:02 +00001722static int be_num_txqs_want(struct be_adapter *adapter)
1723{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001724 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001725 lancer_chip(adapter) || !be_physfn(adapter) ||
1726 adapter->generation == BE_GEN2)
1727 return 1;
1728 else
1729 return MAX_TX_QS;
1730}
1731
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001732static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001734 struct be_queue_info *cq, *eq;
1735 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001736 struct be_tx_obj *txo;
1737 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738
Sathya Perladafc0fe2011-10-24 02:45:02 +00001739 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001740 if (adapter->num_tx_qs != MAX_TX_QS) {
1741 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001742 netif_set_real_num_tx_queues(adapter->netdev,
1743 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001744 rtnl_unlock();
1745 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001746
Sathya Perla3c8def92011-06-12 20:01:58 +00001747 for_all_tx_queues(adapter, txo, i) {
1748 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001749 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1750 sizeof(struct be_eth_tx_compl));
1751 if (status)
1752 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001754 /* If num_evt_qs is less than num_tx_qs, then more than
1755 * one txq share an eq
1756 */
1757 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1758 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1759 if (status)
1760 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001761 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763}
1764
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001765static int be_tx_qs_create(struct be_adapter *adapter)
1766{
1767 struct be_tx_obj *txo;
1768 int i, status;
1769
1770 for_all_tx_queues(adapter, txo, i) {
1771 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1772 sizeof(struct be_eth_wrb));
1773 if (status)
1774 return status;
1775
1776 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1777 if (status)
1778 return status;
1779 }
1780
1781 return 0;
1782}
1783
1784static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001785{
1786 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001787 struct be_rx_obj *rxo;
1788 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001789
Sathya Perla3abcded2010-10-03 22:12:27 -07001790 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001791 q = &rxo->cq;
1792 if (q->created)
1793 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1794 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796}
1797
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001798static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001799{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001800 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 struct be_rx_obj *rxo;
1802 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001804 /* We'll create as many RSS rings as there are irqs.
1805 * But when there's only one irq there's no use creating RSS rings
1806 */
1807 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1808 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001809
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001811 for_all_rx_queues(adapter, rxo, i) {
1812 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001813 cq = &rxo->cq;
1814 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1815 sizeof(struct be_eth_rx_compl));
1816 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001817 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001819 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1820 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001821 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001822 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001823 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001825 if (adapter->num_rx_qs != MAX_RX_QS)
1826 dev_info(&adapter->pdev->dev,
1827 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001829 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001830}
1831
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832static irqreturn_t be_intx(int irq, void *dev)
1833{
1834 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001835 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001836
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001837 /* With INTx only one EQ is used */
1838 num_evts = event_handle(&adapter->eq_obj[0]);
1839 if (num_evts)
1840 return IRQ_HANDLED;
1841 else
1842 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843}
1844
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001845static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001847 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001849 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850 return IRQ_HANDLED;
1851}
1852
Sathya Perla2e588f82011-03-11 02:49:26 +00001853static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854{
Sathya Perla2e588f82011-03-11 02:49:26 +00001855 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856}
1857
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001858static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1859 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860{
Sathya Perla3abcded2010-10-03 22:12:27 -07001861 struct be_adapter *adapter = rxo->adapter;
1862 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001863 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864 u32 work_done;
1865
1866 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001867 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868 if (!rxcp)
1869 break;
1870
Sathya Perla12004ae2011-08-02 19:57:46 +00001871 /* Is it a flush compl that has no data */
1872 if (unlikely(rxcp->num_rcvd == 0))
1873 goto loop_continue;
1874
1875 /* Discard compl with partial DMA Lancer B0 */
1876 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001877 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001878 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001879 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001880
Sathya Perla12004ae2011-08-02 19:57:46 +00001881 /* On BE drop pkts that arrive due to imperfect filtering in
1882 * promiscuous mode on some skews
1883 */
1884 if (unlikely(rxcp->port != adapter->port_num &&
1885 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001886 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001887 goto loop_continue;
1888 }
1889
1890 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001891 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001892 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001893 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001894loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001895 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896 }
1897
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001898 if (work_done) {
1899 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001900
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001901 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1902 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001904
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 return work_done;
1906}
1907
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1909 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001912 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914 for (work_done = 0; work_done < budget; work_done++) {
1915 txcp = be_tx_compl_get(&txo->cq);
1916 if (!txcp)
1917 break;
1918 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00001919 AMAP_GET_BITS(struct amap_eth_tx_compl,
1920 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001921 }
1922
1923 if (work_done) {
1924 be_cq_notify(adapter, txo->cq.id, true, work_done);
1925 atomic_sub(num_wrbs, &txo->q.used);
1926
1927 /* As Tx wrbs have been freed up, wake up netdev queue
1928 * if it was stopped due to lack of tx wrbs. */
1929 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1930 atomic_read(&txo->q.used) < txo->q.len / 2) {
1931 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00001932 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001933
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001934 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1935 tx_stats(txo)->tx_compl += work_done;
1936 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1937 }
1938 return (work_done < budget); /* Done */
1939}
Sathya Perla3c8def92011-06-12 20:01:58 +00001940
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001941int be_poll(struct napi_struct *napi, int budget)
1942{
1943 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1944 struct be_adapter *adapter = eqo->adapter;
1945 int max_work = 0, work, i;
1946 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00001947
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948 /* Process all TXQs serviced by this EQ */
1949 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1950 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1951 eqo->tx_budget, i);
1952 if (!tx_done)
1953 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001954 }
1955
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001956 /* This loop will iterate twice for EQ0 in which
1957 * completions of the last RXQ (default one) are also processed
1958 * For other EQs the loop iterates only once
1959 */
1960 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
1961 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
1962 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001963 }
1964
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001965 if (is_mcc_eqo(eqo))
1966 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001967
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001968 if (max_work < budget) {
1969 napi_complete(napi);
1970 be_eq_notify(adapter, eqo->q.id, true, false, 0);
1971 } else {
1972 /* As we'll continue in polling mode, count and clear events */
1973 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00001974 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001975 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976}
1977
Ajit Khaparded053de92010-09-03 06:23:30 +00001978void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001979{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001980 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1981 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00001982 u32 i;
1983
Sathya Perla72f02482011-11-10 19:17:58 +00001984 if (adapter->eeh_err || adapter->ue_detected)
1985 return;
1986
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001987 if (lancer_chip(adapter)) {
1988 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1989 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1990 sliport_err1 = ioread32(adapter->db +
1991 SLIPORT_ERROR1_OFFSET);
1992 sliport_err2 = ioread32(adapter->db +
1993 SLIPORT_ERROR2_OFFSET);
1994 }
1995 } else {
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_LOW, &ue_lo);
1998 pci_read_config_dword(adapter->pdev,
1999 PCICFG_UE_STATUS_HIGH, &ue_hi);
2000 pci_read_config_dword(adapter->pdev,
2001 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2002 pci_read_config_dword(adapter->pdev,
2003 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002004
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002005 ue_lo = (ue_lo & (~ue_lo_mask));
2006 ue_hi = (ue_hi & (~ue_hi_mask));
2007 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002008
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002009 if (ue_lo || ue_hi ||
2010 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002011 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002012 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002013 dev_err(&adapter->pdev->dev,
2014 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002015 }
2016
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002017 if (ue_lo) {
2018 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2019 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002020 dev_err(&adapter->pdev->dev,
2021 "UE: %s bit set\n", ue_status_low_desc[i]);
2022 }
2023 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002024 if (ue_hi) {
2025 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2026 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002027 dev_err(&adapter->pdev->dev,
2028 "UE: %s bit set\n", ue_status_hi_desc[i]);
2029 }
2030 }
2031
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002032 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2033 dev_err(&adapter->pdev->dev,
2034 "sliport status 0x%x\n", sliport_status);
2035 dev_err(&adapter->pdev->dev,
2036 "sliport error1 0x%x\n", sliport_err1);
2037 dev_err(&adapter->pdev->dev,
2038 "sliport error2 0x%x\n", sliport_err2);
2039 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002040}
2041
Sathya Perla8d56ff12009-11-22 22:02:26 +00002042static void be_msix_disable(struct be_adapter *adapter)
2043{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002044 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002045 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002046 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002047 }
2048}
2049
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002050static uint be_num_rss_want(struct be_adapter *adapter)
2051{
2052 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2053 adapter->num_vfs == 0 && be_physfn(adapter) &&
2054 !be_is_mc(adapter))
2055 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2056 else
2057 return 0;
2058}
2059
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060static void be_msix_enable(struct be_adapter *adapter)
2061{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002062#define BE_MIN_MSIX_VECTORS 1
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002063 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002065 /* If RSS queues are not used, need a vec for default RX Q */
2066 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2067 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002068
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002069 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070 adapter->msix_entries[i].entry = i;
2071
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002072 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002073 if (status == 0) {
2074 goto done;
2075 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002076 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002077 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002078 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002079 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002080 }
2081 return;
2082done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002083 adapter->num_msix_vec = num_vec;
2084 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002085}
2086
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002087static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002088{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002089 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002090
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002091#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002092 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002093 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002094 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002095
2096 pos = pci_find_ext_capability(adapter->pdev,
2097 PCI_EXT_CAP_ID_SRIOV);
2098 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002099 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002100
Sathya Perla11ac75e2011-12-13 00:58:50 +00002101 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2102 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002103 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002104 "Device supports %d VFs and not %d\n",
2105 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002106
Sathya Perla11ac75e2011-12-13 00:58:50 +00002107 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2108 if (status)
2109 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002110
Sathya Perla11ac75e2011-12-13 00:58:50 +00002111 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002112 adapter->vf_cfg = kcalloc(num_vfs,
2113 sizeof(struct be_vf_cfg),
2114 GFP_KERNEL);
2115 if (!adapter->vf_cfg)
2116 return -ENOMEM;
2117 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002118 }
2119#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002120 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002121}
2122
2123static void be_sriov_disable(struct be_adapter *adapter)
2124{
2125#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002126 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002127 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002128 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002129 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002130 }
2131#endif
2132}
2133
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002134static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002137 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138}
2139
2140static int be_msix_register(struct be_adapter *adapter)
2141{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142 struct net_device *netdev = adapter->netdev;
2143 struct be_eq_obj *eqo;
2144 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002145
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002146 for_all_evt_queues(adapter, eqo, i) {
2147 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2148 vec = be_msix_vec_get(adapter, eqo);
2149 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002150 if (status)
2151 goto err_msix;
2152 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002153
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002154 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002155err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2157 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2158 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2159 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002160 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161 return status;
2162}
2163
2164static int be_irq_register(struct be_adapter *adapter)
2165{
2166 struct net_device *netdev = adapter->netdev;
2167 int status;
2168
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002169 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002170 status = be_msix_register(adapter);
2171 if (status == 0)
2172 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002173 /* INTx is not supported for VF */
2174 if (!be_physfn(adapter))
2175 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176 }
2177
2178 /* INTx */
2179 netdev->irq = adapter->pdev->irq;
2180 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2181 adapter);
2182 if (status) {
2183 dev_err(&adapter->pdev->dev,
2184 "INTx request IRQ failed - err %d\n", status);
2185 return status;
2186 }
2187done:
2188 adapter->isr_registered = true;
2189 return 0;
2190}
2191
2192static void be_irq_unregister(struct be_adapter *adapter)
2193{
2194 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002195 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002196 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197
2198 if (!adapter->isr_registered)
2199 return;
2200
2201 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002202 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203 free_irq(netdev->irq, adapter);
2204 goto done;
2205 }
2206
2207 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 for_all_evt_queues(adapter, eqo, i)
2209 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002210
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211done:
2212 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213}
2214
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002215static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002216{
2217 struct be_queue_info *q;
2218 struct be_rx_obj *rxo;
2219 int i;
2220
2221 for_all_rx_queues(adapter, rxo, i) {
2222 q = &rxo->q;
2223 if (q->created) {
2224 be_cmd_rxq_destroy(adapter, q);
2225 /* After the rxq is invalidated, wait for a grace time
2226 * of 1ms for all dma to end and the flush compl to
2227 * arrive
2228 */
2229 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002230 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002231 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002233 }
2234}
2235
Sathya Perla889cd4b2010-05-30 23:33:45 +00002236static int be_close(struct net_device *netdev)
2237{
2238 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +00002239 struct be_tx_obj *txo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 struct be_eq_obj *eqo;
2241 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002242
Sathya Perla889cd4b2010-05-30 23:33:45 +00002243 be_async_mcc_disable(adapter);
2244
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002245 if (!lancer_chip(adapter))
2246 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002247
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002248 for_all_evt_queues(adapter, eqo, i) {
2249 napi_disable(&eqo->napi);
2250 if (msix_enabled(adapter))
2251 synchronize_irq(be_msix_vec_get(adapter, eqo));
2252 else
2253 synchronize_irq(netdev->irq);
2254 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002255 }
2256
Sathya Perla889cd4b2010-05-30 23:33:45 +00002257 be_irq_unregister(adapter);
2258
Sathya Perla889cd4b2010-05-30 23:33:45 +00002259 /* Wait for all pending tx completions to arrive so that
2260 * all tx skbs are freed.
2261 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002262 for_all_tx_queues(adapter, txo, i)
2263 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002264
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002266 return 0;
2267}
2268
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002270{
2271 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002272 int rc, i, j;
2273 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002274
2275 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002276 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2277 sizeof(struct be_eth_rx_d));
2278 if (rc)
2279 return rc;
2280 }
2281
2282 /* The FW would like the default RXQ to be created first */
2283 rxo = default_rxo(adapter);
2284 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2285 adapter->if_handle, false, &rxo->rss_id);
2286 if (rc)
2287 return rc;
2288
2289 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002290 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 rx_frag_size, adapter->if_handle,
2292 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002293 if (rc)
2294 return rc;
2295 }
2296
2297 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002298 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2299 for_all_rss_queues(adapter, rxo, i) {
2300 if ((j + i) >= 128)
2301 break;
2302 rsstable[j + i] = rxo->rss_id;
2303 }
2304 }
2305 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002306 if (rc)
2307 return rc;
2308 }
2309
2310 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002312 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002313 return 0;
2314}
2315
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316static int be_open(struct net_device *netdev)
2317{
2318 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002319 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002320 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002321 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002322 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002323 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002324
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002325 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002326 if (status)
2327 goto err;
2328
Sathya Perla5fb379e2009-06-18 00:02:59 +00002329 be_irq_register(adapter);
2330
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002331 if (!lancer_chip(adapter))
2332 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002333
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002335 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002336
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002337 for_all_tx_queues(adapter, txo, i)
2338 be_cq_notify(adapter, txo->cq.id, true, 0);
2339
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002340 be_async_mcc_enable(adapter);
2341
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002342 for_all_evt_queues(adapter, eqo, i) {
2343 napi_enable(&eqo->napi);
2344 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2345 }
2346
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002347 status = be_cmd_link_status_query(adapter, NULL, NULL,
2348 &link_status, 0);
2349 if (!status)
2350 be_link_status_update(adapter, link_status);
2351
Sathya Perla889cd4b2010-05-30 23:33:45 +00002352 return 0;
2353err:
2354 be_close(adapter->netdev);
2355 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002356}
2357
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002358static int be_setup_wol(struct be_adapter *adapter, bool enable)
2359{
2360 struct be_dma_mem cmd;
2361 int status = 0;
2362 u8 mac[ETH_ALEN];
2363
2364 memset(mac, 0, ETH_ALEN);
2365
2366 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002367 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2368 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002369 if (cmd.va == NULL)
2370 return -1;
2371 memset(cmd.va, 0, cmd.size);
2372
2373 if (enable) {
2374 status = pci_write_config_dword(adapter->pdev,
2375 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2376 if (status) {
2377 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002378 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002379 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2380 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002381 return status;
2382 }
2383 status = be_cmd_enable_magic_wol(adapter,
2384 adapter->netdev->dev_addr, &cmd);
2385 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2386 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2387 } else {
2388 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2389 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2390 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2391 }
2392
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002393 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002394 return status;
2395}
2396
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002397/*
2398 * Generate a seed MAC address from the PF MAC Address using jhash.
2399 * MAC Address for VFs are assigned incrementally starting from the seed.
2400 * These addresses are programmed in the ASIC by the PF and the VF driver
2401 * queries for the MAC address during its probe.
2402 */
2403static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2404{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002405 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002406 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002407 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002408 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002409
2410 be_vf_eth_addr_generate(adapter, mac);
2411
Sathya Perla11ac75e2011-12-13 00:58:50 +00002412 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002413 if (lancer_chip(adapter)) {
2414 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2415 } else {
2416 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002417 vf_cfg->if_handle,
2418 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002419 }
2420
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002421 if (status)
2422 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002423 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002424 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002425 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002426
2427 mac[5] += 1;
2428 }
2429 return status;
2430}
2431
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002432static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002433{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002434 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002435 u32 vf;
2436
Sathya Perla11ac75e2011-12-13 00:58:50 +00002437 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002438 if (lancer_chip(adapter))
2439 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2440 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002441 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2442 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002443
Sathya Perla11ac75e2011-12-13 00:58:50 +00002444 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2445 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002446}
2447
Sathya Perlaa54769f2011-10-24 02:45:00 +00002448static int be_clear(struct be_adapter *adapter)
2449{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002450 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002451 be_vf_clear(adapter);
2452
2453 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002454
2455 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002456 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002457 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002458 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002459
2460 /* tell fw we're done with firing cmds */
2461 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462
2463 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002464 return 0;
2465}
2466
Sathya Perla30128032011-11-10 19:17:57 +00002467static void be_vf_setup_init(struct be_adapter *adapter)
2468{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002469 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002470 int vf;
2471
Sathya Perla11ac75e2011-12-13 00:58:50 +00002472 for_all_vfs(adapter, vf_cfg, vf) {
2473 vf_cfg->if_handle = -1;
2474 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002475 }
2476}
2477
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002478static int be_vf_setup(struct be_adapter *adapter)
2479{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002480 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002481 u32 cap_flags, en_flags, vf;
2482 u16 lnk_speed;
2483 int status;
2484
Sathya Perla30128032011-11-10 19:17:57 +00002485 be_vf_setup_init(adapter);
2486
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002487 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2488 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002489 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002490 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002491 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002492 if (status)
2493 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002494 }
2495
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002496 status = be_vf_eth_addr_config(adapter);
2497 if (status)
2498 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002499
Sathya Perla11ac75e2011-12-13 00:58:50 +00002500 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002501 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002502 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002503 if (status)
2504 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 vf_cfg->tx_rate = lnk_speed * 10;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002506 }
2507 return 0;
2508err:
2509 return status;
2510}
2511
Sathya Perla30128032011-11-10 19:17:57 +00002512static void be_setup_init(struct be_adapter *adapter)
2513{
2514 adapter->vlan_prio_bmap = 0xff;
2515 adapter->link_speed = -1;
2516 adapter->if_handle = -1;
2517 adapter->be3_native = false;
2518 adapter->promiscuous = false;
2519 adapter->eq_next_idx = 0;
2520}
2521
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002522static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002523{
2524 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002525 int status;
2526 bool pmac_id_active;
2527
2528 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2529 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002530 if (status != 0)
2531 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002532
2533 if (pmac_id_active) {
2534 status = be_cmd_mac_addr_query(adapter, mac,
2535 MAC_ADDRESS_TYPE_NETWORK,
2536 false, adapter->if_handle, pmac_id);
2537
2538 if (!status)
2539 adapter->pmac_id = pmac_id;
2540 } else {
2541 status = be_cmd_pmac_add(adapter, mac,
2542 adapter->if_handle, &adapter->pmac_id, 0);
2543 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002544do_none:
2545 return status;
2546}
2547
Sathya Perla5fb379e2009-06-18 00:02:59 +00002548static int be_setup(struct be_adapter *adapter)
2549{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002550 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002551 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002552 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002553 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002554 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002555
Sathya Perla30128032011-11-10 19:17:57 +00002556 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002557
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002558 be_cmd_req_native_mode(adapter);
2559
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002560 be_msix_enable(adapter);
2561
2562 status = be_evt_queues_create(adapter);
2563 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002564 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002565
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002566 status = be_tx_cqs_create(adapter);
2567 if (status)
2568 goto err;
2569
2570 status = be_rx_cqs_create(adapter);
2571 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002572 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002573
Sathya Perla5fb379e2009-06-18 00:02:59 +00002574 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002575 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002576 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002577
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002578 memset(mac, 0, ETH_ALEN);
2579 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002580 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002581 if (status)
2582 return status;
2583 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2584 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2585
2586 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2587 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2588 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002589 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2590
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002591 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2592 cap_flags |= BE_IF_FLAGS_RSS;
2593 en_flags |= BE_IF_FLAGS_RSS;
2594 }
2595 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2596 netdev->dev_addr, &adapter->if_handle,
2597 &adapter->pmac_id, 0);
2598 if (status != 0)
2599 goto err;
2600
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002601 /* The VF's permanent mac queried from card is incorrect.
2602 * For BEx: Query the mac configued by the PF using if_handle
2603 * For Lancer: Get and use mac_list to obtain mac address.
2604 */
2605 if (!be_physfn(adapter)) {
2606 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002607 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002608 else
2609 status = be_cmd_mac_addr_query(adapter, mac,
2610 MAC_ADDRESS_TYPE_NETWORK, false,
2611 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002612 if (!status) {
2613 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2614 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2615 }
2616 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002617
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002618 status = be_tx_qs_create(adapter);
2619 if (status)
2620 goto err;
2621
Sathya Perla04b71172011-09-27 13:30:27 -04002622 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002623
Sathya Perlaa54769f2011-10-24 02:45:00 +00002624 status = be_vid_config(adapter, false, 0);
2625 if (status)
2626 goto err;
2627
2628 be_set_rx_mode(adapter->netdev);
2629
2630 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002631 /* For Lancer: It is legal for this cmd to fail on VF */
2632 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002633 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002634
Sathya Perlaa54769f2011-10-24 02:45:00 +00002635 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2636 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2637 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002638 /* For Lancer: It is legal for this cmd to fail on VF */
2639 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002640 goto err;
2641 }
2642
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002643 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002644
Sathya Perla11ac75e2011-12-13 00:58:50 +00002645 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002646 status = be_vf_setup(adapter);
2647 if (status)
2648 goto err;
2649 }
2650
2651 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002652err:
2653 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002654 return status;
2655}
2656
Ivan Vecera66268732011-12-08 01:31:21 +00002657#ifdef CONFIG_NET_POLL_CONTROLLER
2658static void be_netpoll(struct net_device *netdev)
2659{
2660 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002661 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002662 int i;
2663
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002664 for_all_evt_queues(adapter, eqo, i)
2665 event_handle(eqo);
2666
2667 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002668}
2669#endif
2670
Ajit Khaparde84517482009-09-04 03:12:16 +00002671#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002672static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002673 const u8 *p, u32 img_start, int image_size,
2674 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002675{
2676 u32 crc_offset;
2677 u8 flashed_crc[4];
2678 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002679
2680 crc_offset = hdr_size + img_start + image_size - 4;
2681
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002682 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002683
2684 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002685 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002686 if (status) {
2687 dev_err(&adapter->pdev->dev,
2688 "could not get crc from flash, not flashing redboot\n");
2689 return false;
2690 }
2691
2692 /*update redboot only if crc does not match*/
2693 if (!memcmp(flashed_crc, p, 4))
2694 return false;
2695 else
2696 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002697}
2698
Sathya Perla306f1342011-08-02 19:57:45 +00002699static bool phy_flashing_required(struct be_adapter *adapter)
2700{
2701 int status = 0;
2702 struct be_phy_info phy_info;
2703
2704 status = be_cmd_get_phy_info(adapter, &phy_info);
2705 if (status)
2706 return false;
2707 if ((phy_info.phy_type == TN_8022) &&
2708 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2709 return true;
2710 }
2711 return false;
2712}
2713
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002714static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002715 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002716 struct be_dma_mem *flash_cmd, int num_of_images)
2717
Ajit Khaparde84517482009-09-04 03:12:16 +00002718{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002719 int status = 0, i, filehdr_size = 0;
2720 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002721 int num_bytes;
2722 const u8 *p = fw->data;
2723 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002724 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002725 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002726
Sathya Perla306f1342011-08-02 19:57:45 +00002727 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002728 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2729 FLASH_IMAGE_MAX_SIZE_g3},
2730 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2731 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2732 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2733 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2734 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2735 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2736 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2737 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2738 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2739 FLASH_IMAGE_MAX_SIZE_g3},
2740 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2741 FLASH_IMAGE_MAX_SIZE_g3},
2742 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002743 FLASH_IMAGE_MAX_SIZE_g3},
2744 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002745 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2746 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2747 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002748 };
Joe Perches215faf92010-12-21 02:16:10 -08002749 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002750 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2751 FLASH_IMAGE_MAX_SIZE_g2},
2752 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2753 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2754 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2755 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2756 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2757 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2758 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2759 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2760 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2761 FLASH_IMAGE_MAX_SIZE_g2},
2762 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2763 FLASH_IMAGE_MAX_SIZE_g2},
2764 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2765 FLASH_IMAGE_MAX_SIZE_g2}
2766 };
2767
2768 if (adapter->generation == BE_GEN3) {
2769 pflashcomp = gen3_flash_types;
2770 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002771 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002772 } else {
2773 pflashcomp = gen2_flash_types;
2774 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002775 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002776 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002777 for (i = 0; i < num_comp; i++) {
2778 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2779 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2780 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002781 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2782 if (!phy_flashing_required(adapter))
2783 continue;
2784 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002785 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2786 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002787 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2788 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002789 continue;
2790 p = fw->data;
2791 p += filehdr_size + pflashcomp[i].offset
2792 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002793 if (p + pflashcomp[i].size > fw->data + fw->size)
2794 return -1;
2795 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002796 while (total_bytes) {
2797 if (total_bytes > 32*1024)
2798 num_bytes = 32*1024;
2799 else
2800 num_bytes = total_bytes;
2801 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002802 if (!total_bytes) {
2803 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2804 flash_op = FLASHROM_OPER_PHY_FLASH;
2805 else
2806 flash_op = FLASHROM_OPER_FLASH;
2807 } else {
2808 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2809 flash_op = FLASHROM_OPER_PHY_SAVE;
2810 else
2811 flash_op = FLASHROM_OPER_SAVE;
2812 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002813 memcpy(req->params.data_buf, p, num_bytes);
2814 p += num_bytes;
2815 status = be_cmd_write_flashrom(adapter, flash_cmd,
2816 pflashcomp[i].optype, flash_op, num_bytes);
2817 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002818 if ((status == ILLEGAL_IOCTL_REQ) &&
2819 (pflashcomp[i].optype ==
2820 IMG_TYPE_PHY_FW))
2821 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002822 dev_err(&adapter->pdev->dev,
2823 "cmd to write to flash rom failed.\n");
2824 return -1;
2825 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002826 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002827 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002828 return 0;
2829}
2830
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002831static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2832{
2833 if (fhdr == NULL)
2834 return 0;
2835 if (fhdr->build[0] == '3')
2836 return BE_GEN3;
2837 else if (fhdr->build[0] == '2')
2838 return BE_GEN2;
2839 else
2840 return 0;
2841}
2842
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002843static int lancer_fw_download(struct be_adapter *adapter,
2844 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002845{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002846#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2847#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2848 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002849 const u8 *data_ptr = NULL;
2850 u8 *dest_image_ptr = NULL;
2851 size_t image_size = 0;
2852 u32 chunk_size = 0;
2853 u32 data_written = 0;
2854 u32 offset = 0;
2855 int status = 0;
2856 u8 add_status = 0;
2857
2858 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2859 dev_err(&adapter->pdev->dev,
2860 "FW Image not properly aligned. "
2861 "Length must be 4 byte aligned.\n");
2862 status = -EINVAL;
2863 goto lancer_fw_exit;
2864 }
2865
2866 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2867 + LANCER_FW_DOWNLOAD_CHUNK;
2868 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2869 &flash_cmd.dma, GFP_KERNEL);
2870 if (!flash_cmd.va) {
2871 status = -ENOMEM;
2872 dev_err(&adapter->pdev->dev,
2873 "Memory allocation failure while flashing\n");
2874 goto lancer_fw_exit;
2875 }
2876
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002877 dest_image_ptr = flash_cmd.va +
2878 sizeof(struct lancer_cmd_req_write_object);
2879 image_size = fw->size;
2880 data_ptr = fw->data;
2881
2882 while (image_size) {
2883 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2884
2885 /* Copy the image chunk content. */
2886 memcpy(dest_image_ptr, data_ptr, chunk_size);
2887
2888 status = lancer_cmd_write_object(adapter, &flash_cmd,
2889 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2890 &data_written, &add_status);
2891
2892 if (status)
2893 break;
2894
2895 offset += data_written;
2896 data_ptr += data_written;
2897 image_size -= data_written;
2898 }
2899
2900 if (!status) {
2901 /* Commit the FW written */
2902 status = lancer_cmd_write_object(adapter, &flash_cmd,
2903 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2904 &data_written, &add_status);
2905 }
2906
2907 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2908 flash_cmd.dma);
2909 if (status) {
2910 dev_err(&adapter->pdev->dev,
2911 "Firmware load error. "
2912 "Status code: 0x%x Additional Status: 0x%x\n",
2913 status, add_status);
2914 goto lancer_fw_exit;
2915 }
2916
2917 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2918lancer_fw_exit:
2919 return status;
2920}
2921
2922static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2923{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002924 struct flash_file_hdr_g2 *fhdr;
2925 struct flash_file_hdr_g3 *fhdr3;
2926 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002927 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002928 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002929 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002930
2931 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002932 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002933
Ajit Khaparde84517482009-09-04 03:12:16 +00002934 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002935 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2936 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002937 if (!flash_cmd.va) {
2938 status = -ENOMEM;
2939 dev_err(&adapter->pdev->dev,
2940 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002941 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002942 }
2943
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002944 if ((adapter->generation == BE_GEN3) &&
2945 (get_ufigen_type(fhdr) == BE_GEN3)) {
2946 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002947 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2948 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002949 img_hdr_ptr = (struct image_hdr *) (fw->data +
2950 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002951 i * sizeof(struct image_hdr)));
2952 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2953 status = be_flash_data(adapter, fw, &flash_cmd,
2954 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002955 }
2956 } else if ((adapter->generation == BE_GEN2) &&
2957 (get_ufigen_type(fhdr) == BE_GEN2)) {
2958 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2959 } else {
2960 dev_err(&adapter->pdev->dev,
2961 "UFI and Interface are not compatible for flashing\n");
2962 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002963 }
2964
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002965 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2966 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002967 if (status) {
2968 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002969 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002970 }
2971
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002972 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002973
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002974be_fw_exit:
2975 return status;
2976}
2977
2978int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2979{
2980 const struct firmware *fw;
2981 int status;
2982
2983 if (!netif_running(adapter->netdev)) {
2984 dev_err(&adapter->pdev->dev,
2985 "Firmware load not allowed (interface is down)\n");
2986 return -1;
2987 }
2988
2989 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2990 if (status)
2991 goto fw_exit;
2992
2993 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2994
2995 if (lancer_chip(adapter))
2996 status = lancer_fw_download(adapter, fw);
2997 else
2998 status = be_fw_download(adapter, fw);
2999
Ajit Khaparde84517482009-09-04 03:12:16 +00003000fw_exit:
3001 release_firmware(fw);
3002 return status;
3003}
3004
stephen hemmingere5686ad2012-01-05 19:10:25 +00003005static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006 .ndo_open = be_open,
3007 .ndo_stop = be_close,
3008 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003009 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010 .ndo_set_mac_address = be_mac_addr_set,
3011 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003012 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003013 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003014 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3015 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003016 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003017 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003018 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003019 .ndo_get_vf_config = be_get_vf_config,
3020#ifdef CONFIG_NET_POLL_CONTROLLER
3021 .ndo_poll_controller = be_netpoll,
3022#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023};
3024
3025static void be_netdev_init(struct net_device *netdev)
3026{
3027 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003028 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003029 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003030
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003031 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003032 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3033 NETIF_F_HW_VLAN_TX;
3034 if (be_multi_rxq(adapter))
3035 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003036
3037 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003038 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003039
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003040 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003041 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003042
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003043 netdev->flags |= IFF_MULTICAST;
3044
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003045 netif_set_gso_max_size(netdev, 65535);
3046
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003047 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003048
3049 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3050
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003051 for_all_evt_queues(adapter, eqo, i)
3052 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053}
3054
3055static void be_unmap_pci_bars(struct be_adapter *adapter)
3056{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003057 if (adapter->csr)
3058 iounmap(adapter->csr);
3059 if (adapter->db)
3060 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003061}
3062
3063static int be_map_pci_bars(struct be_adapter *adapter)
3064{
3065 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003066 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003068 if (lancer_chip(adapter)) {
3069 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3070 pci_resource_len(adapter->pdev, 0));
3071 if (addr == NULL)
3072 return -ENOMEM;
3073 adapter->db = addr;
3074 return 0;
3075 }
3076
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003077 if (be_physfn(adapter)) {
3078 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3079 pci_resource_len(adapter->pdev, 2));
3080 if (addr == NULL)
3081 return -ENOMEM;
3082 adapter->csr = addr;
3083 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003084
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003085 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003086 db_reg = 4;
3087 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003088 if (be_physfn(adapter))
3089 db_reg = 4;
3090 else
3091 db_reg = 0;
3092 }
3093 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3094 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003095 if (addr == NULL)
3096 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003097 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003098
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003099 return 0;
3100pci_map_err:
3101 be_unmap_pci_bars(adapter);
3102 return -ENOMEM;
3103}
3104
3105
3106static void be_ctrl_cleanup(struct be_adapter *adapter)
3107{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003108 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003109
3110 be_unmap_pci_bars(adapter);
3111
3112 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003113 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3114 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003115
Sathya Perla5b8821b2011-08-02 19:57:44 +00003116 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003117 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003118 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3119 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120}
3121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003122static int be_ctrl_init(struct be_adapter *adapter)
3123{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003124 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3125 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003126 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003127 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003128
3129 status = be_map_pci_bars(adapter);
3130 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003131 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003132
3133 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003134 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3135 mbox_mem_alloc->size,
3136 &mbox_mem_alloc->dma,
3137 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003138 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003139 status = -ENOMEM;
3140 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003141 }
3142 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3143 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3144 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3145 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003146
Sathya Perla5b8821b2011-08-02 19:57:44 +00003147 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3148 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3149 &rx_filter->dma, GFP_KERNEL);
3150 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003151 status = -ENOMEM;
3152 goto free_mbox;
3153 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003154 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003155
Ivan Vecera29849612010-12-14 05:43:19 +00003156 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003157 spin_lock_init(&adapter->mcc_lock);
3158 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003159
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003160 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003161 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003162 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003163
3164free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003165 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3166 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003167
3168unmap_pci_bars:
3169 be_unmap_pci_bars(adapter);
3170
3171done:
3172 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003173}
3174
3175static void be_stats_cleanup(struct be_adapter *adapter)
3176{
Sathya Perla3abcded2010-10-03 22:12:27 -07003177 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003178
3179 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003180 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3181 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003182}
3183
3184static int be_stats_init(struct be_adapter *adapter)
3185{
Sathya Perla3abcded2010-10-03 22:12:27 -07003186 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003187
Selvin Xavier005d5692011-05-16 07:36:35 +00003188 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003189 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003190 } else {
3191 if (lancer_chip(adapter))
3192 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3193 else
3194 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3195 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003196 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3197 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003198 if (cmd->va == NULL)
3199 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003200 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003201 return 0;
3202}
3203
3204static void __devexit be_remove(struct pci_dev *pdev)
3205{
3206 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003207
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208 if (!adapter)
3209 return;
3210
Somnath Koturf203af72010-10-25 23:01:03 +00003211 cancel_delayed_work_sync(&adapter->work);
3212
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003213 unregister_netdev(adapter->netdev);
3214
Sathya Perla5fb379e2009-06-18 00:02:59 +00003215 be_clear(adapter);
3216
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003217 be_stats_cleanup(adapter);
3218
3219 be_ctrl_cleanup(adapter);
3220
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003221 be_sriov_disable(adapter);
3222
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003223 pci_set_drvdata(pdev, NULL);
3224 pci_release_regions(pdev);
3225 pci_disable_device(pdev);
3226
3227 free_netdev(adapter->netdev);
3228}
3229
Sathya Perla2243e2e2009-11-22 22:02:03 +00003230static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003231{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003232 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003233
Sathya Perla3abcded2010-10-03 22:12:27 -07003234 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3235 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003236 if (status)
3237 return status;
3238
Sathya Perla752961a2011-10-24 02:45:03 +00003239 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003240 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3241 else
3242 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3243
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003244 status = be_cmd_get_cntl_attributes(adapter);
3245 if (status)
3246 return status;
3247
Sathya Perla2243e2e2009-11-22 22:02:03 +00003248 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003249}
3250
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003251static int be_dev_family_check(struct be_adapter *adapter)
3252{
3253 struct pci_dev *pdev = adapter->pdev;
3254 u32 sli_intf = 0, if_type;
3255
3256 switch (pdev->device) {
3257 case BE_DEVICE_ID1:
3258 case OC_DEVICE_ID1:
3259 adapter->generation = BE_GEN2;
3260 break;
3261 case BE_DEVICE_ID2:
3262 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003263 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003264 adapter->generation = BE_GEN3;
3265 break;
3266 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003267 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003268 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3269 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3270 SLI_INTF_IF_TYPE_SHIFT;
3271
3272 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3273 if_type != 0x02) {
3274 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3275 return -EINVAL;
3276 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003277 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3278 SLI_INTF_FAMILY_SHIFT);
3279 adapter->generation = BE_GEN3;
3280 break;
3281 default:
3282 adapter->generation = 0;
3283 }
3284 return 0;
3285}
3286
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003287static int lancer_wait_ready(struct be_adapter *adapter)
3288{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003289#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003290 u32 sliport_status;
3291 int status = 0, i;
3292
3293 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3294 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3295 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3296 break;
3297
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003298 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003299 }
3300
3301 if (i == SLIPORT_READY_TIMEOUT)
3302 status = -1;
3303
3304 return status;
3305}
3306
3307static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3308{
3309 int status;
3310 u32 sliport_status, err, reset_needed;
3311 status = lancer_wait_ready(adapter);
3312 if (!status) {
3313 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3314 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3315 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3316 if (err && reset_needed) {
3317 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3318 adapter->db + SLIPORT_CONTROL_OFFSET);
3319
3320 /* check adapter has corrected the error */
3321 status = lancer_wait_ready(adapter);
3322 sliport_status = ioread32(adapter->db +
3323 SLIPORT_STATUS_OFFSET);
3324 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3325 SLIPORT_STATUS_RN_MASK);
3326 if (status || sliport_status)
3327 status = -1;
3328 } else if (err || reset_needed) {
3329 status = -1;
3330 }
3331 }
3332 return status;
3333}
3334
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003335static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3336{
3337 int status;
3338 u32 sliport_status;
3339
3340 if (adapter->eeh_err || adapter->ue_detected)
3341 return;
3342
3343 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3344
3345 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3346 dev_err(&adapter->pdev->dev,
3347 "Adapter in error state."
3348 "Trying to recover.\n");
3349
3350 status = lancer_test_and_set_rdy_state(adapter);
3351 if (status)
3352 goto err;
3353
3354 netif_device_detach(adapter->netdev);
3355
3356 if (netif_running(adapter->netdev))
3357 be_close(adapter->netdev);
3358
3359 be_clear(adapter);
3360
3361 adapter->fw_timeout = false;
3362
3363 status = be_setup(adapter);
3364 if (status)
3365 goto err;
3366
3367 if (netif_running(adapter->netdev)) {
3368 status = be_open(adapter->netdev);
3369 if (status)
3370 goto err;
3371 }
3372
3373 netif_device_attach(adapter->netdev);
3374
3375 dev_err(&adapter->pdev->dev,
3376 "Adapter error recovery succeeded\n");
3377 }
3378 return;
3379err:
3380 dev_err(&adapter->pdev->dev,
3381 "Adapter error recovery failed\n");
3382}
3383
3384static void be_worker(struct work_struct *work)
3385{
3386 struct be_adapter *adapter =
3387 container_of(work, struct be_adapter, work.work);
3388 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003389 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003390 int i;
3391
3392 if (lancer_chip(adapter))
3393 lancer_test_and_recover_fn_err(adapter);
3394
3395 be_detect_dump_ue(adapter);
3396
3397 /* when interrupts are not yet enabled, just reap any pending
3398 * mcc completions */
3399 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003400 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003401 goto reschedule;
3402 }
3403
3404 if (!adapter->stats_cmd_sent) {
3405 if (lancer_chip(adapter))
3406 lancer_cmd_get_pport_stats(adapter,
3407 &adapter->stats_cmd);
3408 else
3409 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3410 }
3411
3412 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003413 if (rxo->rx_post_starved) {
3414 rxo->rx_post_starved = false;
3415 be_post_rx_frags(rxo, GFP_KERNEL);
3416 }
3417 }
3418
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003419 for_all_evt_queues(adapter, eqo, i)
3420 be_eqd_update(adapter, eqo);
3421
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003422reschedule:
3423 adapter->work_counter++;
3424 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3425}
3426
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003427static int __devinit be_probe(struct pci_dev *pdev,
3428 const struct pci_device_id *pdev_id)
3429{
3430 int status = 0;
3431 struct be_adapter *adapter;
3432 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003433
3434 status = pci_enable_device(pdev);
3435 if (status)
3436 goto do_none;
3437
3438 status = pci_request_regions(pdev, DRV_NAME);
3439 if (status)
3440 goto disable_dev;
3441 pci_set_master(pdev);
3442
Sathya Perla3c8def92011-06-12 20:01:58 +00003443 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003444 if (netdev == NULL) {
3445 status = -ENOMEM;
3446 goto rel_reg;
3447 }
3448 adapter = netdev_priv(netdev);
3449 adapter->pdev = pdev;
3450 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003451
3452 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003453 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003454 goto free_netdev;
3455
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003456 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003457 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003458
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003459 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003460 if (!status) {
3461 netdev->features |= NETIF_F_HIGHDMA;
3462 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003463 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003464 if (status) {
3465 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3466 goto free_netdev;
3467 }
3468 }
3469
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003470 status = be_sriov_enable(adapter);
3471 if (status)
3472 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003473
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003474 status = be_ctrl_init(adapter);
3475 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003476 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003477
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003478 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003479 status = lancer_wait_ready(adapter);
3480 if (!status) {
3481 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3482 adapter->db + SLIPORT_CONTROL_OFFSET);
3483 status = lancer_test_and_set_rdy_state(adapter);
3484 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003485 if (status) {
3486 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003487 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003488 }
3489 }
3490
Sathya Perla2243e2e2009-11-22 22:02:03 +00003491 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003492 if (be_physfn(adapter)) {
3493 status = be_cmd_POST(adapter);
3494 if (status)
3495 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003496 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003497
3498 /* tell fw we're ready to fire cmds */
3499 status = be_cmd_fw_init(adapter);
3500 if (status)
3501 goto ctrl_clean;
3502
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003503 status = be_cmd_reset_function(adapter);
3504 if (status)
3505 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003506
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003507 /* The INTR bit may be set in the card when probed by a kdump kernel
3508 * after a crash.
3509 */
3510 if (!lancer_chip(adapter))
3511 be_intr_set(adapter, false);
3512
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003513 status = be_stats_init(adapter);
3514 if (status)
3515 goto ctrl_clean;
3516
Sathya Perla2243e2e2009-11-22 22:02:03 +00003517 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003518 if (status)
3519 goto stats_clean;
3520
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003521 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003522 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003523
Sathya Perla5fb379e2009-06-18 00:02:59 +00003524 status = be_setup(adapter);
3525 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003526 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003527
Sathya Perla3abcded2010-10-03 22:12:27 -07003528 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003529 status = register_netdev(netdev);
3530 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003531 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003532
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003533 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3534 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003535
Somnath Koturf203af72010-10-25 23:01:03 +00003536 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003537 return 0;
3538
Sathya Perla5fb379e2009-06-18 00:02:59 +00003539unsetup:
3540 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003541msix_disable:
3542 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003543stats_clean:
3544 be_stats_cleanup(adapter);
3545ctrl_clean:
3546 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003547disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003548 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003549free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003550 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003551 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003552rel_reg:
3553 pci_release_regions(pdev);
3554disable_dev:
3555 pci_disable_device(pdev);
3556do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003557 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003558 return status;
3559}
3560
3561static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3562{
3563 struct be_adapter *adapter = pci_get_drvdata(pdev);
3564 struct net_device *netdev = adapter->netdev;
3565
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003566 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003567 if (adapter->wol)
3568 be_setup_wol(adapter, true);
3569
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003570 netif_device_detach(netdev);
3571 if (netif_running(netdev)) {
3572 rtnl_lock();
3573 be_close(netdev);
3574 rtnl_unlock();
3575 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003576 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003577
3578 pci_save_state(pdev);
3579 pci_disable_device(pdev);
3580 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3581 return 0;
3582}
3583
3584static int be_resume(struct pci_dev *pdev)
3585{
3586 int status = 0;
3587 struct be_adapter *adapter = pci_get_drvdata(pdev);
3588 struct net_device *netdev = adapter->netdev;
3589
3590 netif_device_detach(netdev);
3591
3592 status = pci_enable_device(pdev);
3593 if (status)
3594 return status;
3595
3596 pci_set_power_state(pdev, 0);
3597 pci_restore_state(pdev);
3598
Sathya Perla2243e2e2009-11-22 22:02:03 +00003599 /* tell fw we're ready to fire cmds */
3600 status = be_cmd_fw_init(adapter);
3601 if (status)
3602 return status;
3603
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003604 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003605 if (netif_running(netdev)) {
3606 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003607 be_open(netdev);
3608 rtnl_unlock();
3609 }
3610 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003611
3612 if (adapter->wol)
3613 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003614
3615 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003616 return 0;
3617}
3618
Sathya Perla82456b02010-02-17 01:35:37 +00003619/*
3620 * An FLR will stop BE from DMAing any data.
3621 */
3622static void be_shutdown(struct pci_dev *pdev)
3623{
3624 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003625
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003626 if (!adapter)
3627 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003628
Sathya Perla0f4a6822011-03-21 20:49:28 +00003629 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003630
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003631 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003632
Sathya Perla82456b02010-02-17 01:35:37 +00003633 if (adapter->wol)
3634 be_setup_wol(adapter, true);
3635
Ajit Khaparde57841862011-04-06 18:08:43 +00003636 be_cmd_reset_function(adapter);
3637
Sathya Perla82456b02010-02-17 01:35:37 +00003638 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003639}
3640
Sathya Perlacf588472010-02-14 21:22:01 +00003641static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3642 pci_channel_state_t state)
3643{
3644 struct be_adapter *adapter = pci_get_drvdata(pdev);
3645 struct net_device *netdev = adapter->netdev;
3646
3647 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3648
3649 adapter->eeh_err = true;
3650
3651 netif_device_detach(netdev);
3652
3653 if (netif_running(netdev)) {
3654 rtnl_lock();
3655 be_close(netdev);
3656 rtnl_unlock();
3657 }
3658 be_clear(adapter);
3659
3660 if (state == pci_channel_io_perm_failure)
3661 return PCI_ERS_RESULT_DISCONNECT;
3662
3663 pci_disable_device(pdev);
3664
3665 return PCI_ERS_RESULT_NEED_RESET;
3666}
3667
3668static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3669{
3670 struct be_adapter *adapter = pci_get_drvdata(pdev);
3671 int status;
3672
3673 dev_info(&adapter->pdev->dev, "EEH reset\n");
3674 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003675 adapter->ue_detected = false;
3676 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003677
3678 status = pci_enable_device(pdev);
3679 if (status)
3680 return PCI_ERS_RESULT_DISCONNECT;
3681
3682 pci_set_master(pdev);
3683 pci_set_power_state(pdev, 0);
3684 pci_restore_state(pdev);
3685
3686 /* Check if card is ok and fw is ready */
3687 status = be_cmd_POST(adapter);
3688 if (status)
3689 return PCI_ERS_RESULT_DISCONNECT;
3690
3691 return PCI_ERS_RESULT_RECOVERED;
3692}
3693
3694static void be_eeh_resume(struct pci_dev *pdev)
3695{
3696 int status = 0;
3697 struct be_adapter *adapter = pci_get_drvdata(pdev);
3698 struct net_device *netdev = adapter->netdev;
3699
3700 dev_info(&adapter->pdev->dev, "EEH resume\n");
3701
3702 pci_save_state(pdev);
3703
3704 /* tell fw we're ready to fire cmds */
3705 status = be_cmd_fw_init(adapter);
3706 if (status)
3707 goto err;
3708
3709 status = be_setup(adapter);
3710 if (status)
3711 goto err;
3712
3713 if (netif_running(netdev)) {
3714 status = be_open(netdev);
3715 if (status)
3716 goto err;
3717 }
3718 netif_device_attach(netdev);
3719 return;
3720err:
3721 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003722}
3723
3724static struct pci_error_handlers be_eeh_handlers = {
3725 .error_detected = be_eeh_err_detected,
3726 .slot_reset = be_eeh_reset,
3727 .resume = be_eeh_resume,
3728};
3729
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003730static struct pci_driver be_driver = {
3731 .name = DRV_NAME,
3732 .id_table = be_dev_ids,
3733 .probe = be_probe,
3734 .remove = be_remove,
3735 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003736 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003737 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003738 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003739};
3740
3741static int __init be_init_module(void)
3742{
Joe Perches8e95a202009-12-03 07:58:21 +00003743 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3744 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003745 printk(KERN_WARNING DRV_NAME
3746 " : Module param rx_frag_size must be 2048/4096/8192."
3747 " Using 2048\n");
3748 rx_frag_size = 2048;
3749 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003750
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003751 return pci_register_driver(&be_driver);
3752}
3753module_init(be_init_module);
3754
3755static void __exit be_exit_module(void)
3756{
3757 pci_unregister_driver(&be_driver);
3758}
3759module_exit(be_exit_module);