blob: 1494f2fc5b95478215ae4bba58e502a88c21ce4c [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000158 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000204 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000223 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000561 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562}
563
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566{
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578}
579
Somnath Kotur93040ae2012-06-26 22:32:10 +0000580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
Somnath Koturcc4ce022010-10-21 07:11:14 -0700585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000588 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700589
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590 memset(hdr, 0, sizeof(*hdr));
591
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000594 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 }
616
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700617 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 }
622
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627}
628
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000630 bool unmap_single)
631{
632 dma_addr_t dma;
633
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000637 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000638 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 dma_unmap_single(dev, dma, wrb->frag_len,
640 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000641 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000643 }
644}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla3c8def92011-06-12 20:01:58 +0000646static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648{
Sathya Perla7101e112010-03-22 20:41:12 +0000649 dma_addr_t busaddr;
650 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000655 bool map_single = false;
656 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 hdr = queue_head_node(txq);
659 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000660 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700663 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000666 goto dma_err;
667 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
672 copied += len;
673 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674
David S. Millerebc8d2a2009-06-09 01:01:31 -0700675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000676 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700677 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000678 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000679 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000680 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000681 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700682 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000686 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 }
688
689 if (dummy_wrb) {
690 wrb = queue_head_node(txq);
691 wrb_fill(wrb, 0, 0);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 queue_head_inc(txq);
694 }
695
Somnath Koturcc4ce022010-10-21 07:11:14 -0700696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000700dma_err:
701 txq->head = map_head;
702 while (copied) {
703 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000704 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000705 map_single = false;
706 copied -= wrb->frag_len;
707 queue_head_inc(txq);
708 }
709 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710}
711
Somnath Kotur93040ae2012-06-26 22:32:10 +0000712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
Stephen Hemminger613573252009-08-31 19:50:58 +0000730static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700731 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732{
733 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000736 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000738 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 bool dummy_wrb, stopped = false;
740
Somnath Kotur93040ae2012-06-26 22:32:10 +0000741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
743
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000746 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 is_ipv4_pkt(skb)) {
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 }
752
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000760 if (unlikely(!skb))
761 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762 }
763
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765
Sathya Perla3c8def92011-06-12 20:01:58 +0000766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000767 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000768 int gso_segs = skb_shinfo(skb)->gso_segs;
769
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000770 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
Sathya Perla7101e112010-03-22 20:41:12 +0000778 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000782 stopped = true;
783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000785 be_txq_notify(adapter, txq->id, wrb_cnt);
786
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000792tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 */
Sathya Perla10329df2012-06-05 19:37:18 +0000818static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
Sathya Perla10329df2012-06-05 19:37:18 +0000820 u16 vids[BE_NUM_VLANS_SUPPORTED];
821 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000822 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000823
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
826 return 0;
827
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
830
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000834 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000835
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000837 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000838
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
840 if (status) {
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000845
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000847
848set_vlan_promisc:
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 NULL, 0, 1, 1);
851 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852}
853
Jiri Pirko8e586132011-12-08 19:52:37 -0500854static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855{
856 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000857 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000863
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000866 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500867
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000868 if (!status)
869 adapter->vlans_added++;
870 else
871 adapter->vlan_tag[vid] = 0;
872ret:
873 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874}
875
Jiri Pirko8e586132011-12-08 19:52:37 -0500876static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877{
878 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000879 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000881 if (!be_physfn(adapter)) {
882 status = -EINVAL;
883 goto ret;
884 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000885
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000887 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000888 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500889
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000890 if (!status)
891 adapter->vlans_added--;
892 else
893 adapter->vlan_tag[vid] = 1;
894ret:
895 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896}
897
Sathya Perlaa54769f2011-10-24 02:45:00 +0000898static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899{
900 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000901 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700902
903 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000905 adapter->promiscuous = true;
906 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000908
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300909 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000913
914 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000915 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000916 }
917
Sathya Perlae7b909a2009-11-22 22:01:10 +0000918 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000919 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000920 netdev_mc_count(netdev) > BE_MAX_MC) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000922 goto done;
923 }
924
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
928
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
932 }
933
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
937 goto done;
938 }
939
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 adapter->if_handle,
944 &adapter->pmac_id[adapter->uc_macs], 0);
945 }
946 }
947
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
951 if (status) {
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000956done:
957 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958}
959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000964 int status;
965
Sathya Perla11ac75e2011-12-13 00:58:50 +0000966 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000967 return -EPERM;
968
Sathya Perla11ac75e2011-12-13 00:58:50 +0000969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000970 return -EINVAL;
971
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000980 }
981
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000982 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000987
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000988 return status;
989}
990
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000991static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000996
Sathya Perla11ac75e2011-12-13 00:58:50 +0000997 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000998 return -EPERM;
999
Sathya Perla11ac75e2011-12-13 00:58:50 +00001000 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001001 return -EINVAL;
1002
1003 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001006 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001008
1009 return 0;
1010}
1011
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001012static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1014{
1015 struct be_adapter *adapter = netdev_priv(netdev);
1016 int status = 0;
1017
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001019 return -EPERM;
1020
Sathya Perla11ac75e2011-12-13 00:58:50 +00001021 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001022 return -EINVAL;
1023
1024 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1031 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001032 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001033 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001034 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001038 }
1039
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001040
1041 if (status)
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1044 return status;
1045}
1046
Ajit Khapardee1d18732010-07-23 01:52:13 +00001047static int be_set_vf_tx_rate(struct net_device *netdev,
1048 int vf, int rate)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 int status = 0;
1052
Sathya Perla11ac75e2011-12-13 00:58:50 +00001053 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001054 return -EPERM;
1055
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001056 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001057 return -EINVAL;
1058
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1062 return -EINVAL;
1063 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001064
Ajit Khaparde856c4012011-02-11 13:32:32 +00001065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001066
1067 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001068 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001069 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001070 else
1071 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001072 return status;
1073}
1074
Sathya Perla39f1d942012-05-08 19:41:24 +00001075static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076{
1077 struct pci_dev *dev, *pdev = adapter->pdev;
1078 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1079 u16 offset, stride;
1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001082 if (!pos)
1083 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) {
1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
Somnath Kotur7665de12012-06-24 19:42:00 +00001090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001092 vfs++;
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 assigned_vfs++;
1095 }
1096 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097 }
1098 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099}
1100
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001101static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001103 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001104 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001105 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001106 u64 pkts;
1107 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001109 if (!eqo->enable_aic) {
1110 eqd = eqo->eqd;
1111 goto modify_eqd;
1112 }
1113
1114 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001115 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001117 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118
Sathya Perla4097f662009-03-24 16:40:13 -07001119 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001120 if (time_before(now, stats->rx_jiffies)) {
1121 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001122 return;
1123 }
1124
Sathya Perlaac124ff2011-07-25 19:10:14 +00001125 /* Update once a second */
1126 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001127 return;
1128
Sathya Perlaab1594e2011-07-25 19:10:15 +00001129 do {
1130 start = u64_stats_fetch_begin_bh(&stats->sync);
1131 pkts = stats->rx_pkts;
1132 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001134 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001135 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001136 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001137 eqd = (stats->rx_pps / 110000) << 3;
1138 eqd = min(eqd, eqo->max_eqd);
1139 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001140 if (eqd < 10)
1141 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142
1143modify_eqd:
1144 if (eqd != eqo->cur_eqd) {
1145 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001147 }
Sathya Perla4097f662009-03-24 16:40:13 -07001148}
1149
Sathya Perla3abcded2010-10-03 22:12:27 -07001150static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001151 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001152{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001153 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001154
Sathya Perlaab1594e2011-07-25 19:10:15 +00001155 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001156 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001157 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001158 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001159 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001160 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001161 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001162 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001163 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164}
1165
Sathya Perla2e588f82011-03-11 02:49:26 +00001166static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001167{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001168 /* L4 checksum is not reliable for non TCP/UDP packets.
1169 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001170 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001172}
1173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001174static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001177 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001179 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180
Sathya Perla3abcded2010-10-03 22:12:27 -07001181 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 BUG_ON(!rx_page_info->page);
1183
Ajit Khaparde205859a2010-02-09 01:34:21 +00001184 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001185 dma_unmap_page(&adapter->pdev->dev,
1186 dma_unmap_addr(rx_page_info, bus),
1187 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001188 rx_page_info->last_page_user = false;
1189 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190
1191 atomic_dec(&rxq->used);
1192 return rx_page_info;
1193}
1194
1195/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001196static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198{
Sathya Perla3abcded2010-10-03 22:12:27 -07001199 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001201 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001203 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001204 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001205 put_page(page_info->page);
1206 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001207 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 }
1209}
1210
1211/*
1212 * skb_fill_rx_data forms a complete skb for an ether frame
1213 * indicated by rxcp.
1214 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001215static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217{
Sathya Perla3abcded2010-10-03 22:12:27 -07001218 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 u16 i, j;
1221 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 u8 *start;
1223
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001224 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225 start = page_address(page_info->page) + page_info->page_offset;
1226 prefetch(start);
1227
1228 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231 skb->len = curr_frag_len;
1232 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001233 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234 /* Complete packet has now been moved to data */
1235 put_page(page_info->page);
1236 skb->data_len = 0;
1237 skb->tail += curr_frag_len;
1238 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001239 hdr_len = ETH_HLEN;
1240 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001242 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243 skb_shinfo(skb)->frags[0].page_offset =
1244 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001245 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001247 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 skb->tail += hdr_len;
1249 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001250 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251
Sathya Perla2e588f82011-03-11 02:49:26 +00001252 if (rxcp->pkt_size <= rx_frag_size) {
1253 BUG_ON(rxcp->num_rcvd != 1);
1254 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 }
1256
1257 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001258 index_inc(&rxcp->rxq_idx, rxq->len);
1259 remaining = rxcp->pkt_size - curr_frag_len;
1260 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001264 /* Coalesce all frags from the same physical page in one slot */
1265 if (page_info->page_offset == 0) {
1266 /* Fresh page */
1267 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001268 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001269 skb_shinfo(skb)->frags[j].page_offset =
1270 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001271 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001272 skb_shinfo(skb)->nr_frags++;
1273 } else {
1274 put_page(page_info->page);
1275 }
1276
Eric Dumazet9e903e02011-10-18 21:00:24 +00001277 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 skb->len += curr_frag_len;
1279 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001280 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001281 remaining -= curr_frag_len;
1282 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001283 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001285 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286}
1287
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001288/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001289static void be_rx_compl_process(struct be_rx_obj *rxo,
1290 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001292 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001293 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001295
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001296 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001297 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001298 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001299 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300 return;
1301 }
1302
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001303 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001305 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001306 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001307 else
1308 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001309
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001310 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001311 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001312 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001313 skb->rxhash = rxcp->rss_hash;
1314
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315
Jiri Pirko343e43c2011-08-25 02:50:51 +00001316 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
1319 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320}
1321
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001322/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001323void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001326 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001328 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001329 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001330 u16 remaining, curr_frag_len;
1331 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001333 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001334 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001335 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001336 return;
1337 }
1338
Sathya Perla2e588f82011-03-11 02:49:26 +00001339 remaining = rxcp->pkt_size;
1340 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001341 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342
1343 curr_frag_len = min(remaining, rx_frag_size);
1344
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001345 /* Coalesce all frags from the same physical page in one slot */
1346 if (i == 0 || page_info->page_offset == 0) {
1347 /* First frag or Fresh page */
1348 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001349 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001350 skb_shinfo(skb)->frags[j].page_offset =
1351 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001352 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001353 } else {
1354 put_page(page_info->page);
1355 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001356 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001357 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001359 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 memset(page_info, 0, sizeof(*page_info));
1361 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001362 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001364 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001365 skb->len = rxcp->pkt_size;
1366 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001367 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001369 if (adapter->netdev->features & NETIF_F_RXHASH)
1370 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001371
Jiri Pirko343e43c2011-08-25 02:50:51 +00001372 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001373 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001375 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376}
1377
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001378static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380{
Sathya Perla2e588f82011-03-11 02:49:26 +00001381 rxcp->pkt_size =
1382 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001386 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001387 rxcp->ip_csum =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389 rxcp->l4_csum =
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391 rxcp->ipv6 =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393 rxcp->rxq_idx =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395 rxcp->num_rcvd =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397 rxcp->pkt_type =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001399 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001400 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001401 if (rxcp->vlanf) {
1402 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001403 compl);
1404 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001406 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001407 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001408}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001410static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001412{
1413 rxcp->pkt_size =
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001419 rxcp->ip_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421 rxcp->l4_csum =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423 rxcp->ipv6 =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425 rxcp->rxq_idx =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427 rxcp->num_rcvd =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429 rxcp->pkt_type =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001431 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001432 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001433 if (rxcp->vlanf) {
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001435 compl);
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001438 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001440}
1441
1442static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443{
1444 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446 struct be_adapter *adapter = rxo->adapter;
1447
1448 /* For checking the valid bit it is Ok to use either definition as the
1449 * valid bit is at the same position in both v0 and v1 Rx compl */
1450 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 return NULL;
1452
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001453 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001454 be_dws_le_to_cpu(compl, sizeof(*compl));
1455
1456 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001457 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001458 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001459 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001460
Sathya Perla15d72182011-03-21 20:49:26 +00001461 if (rxcp->vlanf) {
1462 /* vlanf could be wrongly set in some cards.
1463 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001464 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001465 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001466
Sathya Perla15d72182011-03-21 20:49:26 +00001467 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001468 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001469
Somnath Kotur939cf302011-08-18 21:51:49 -07001470 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001471 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001472 rxcp->vlanf = 0;
1473 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001474
1475 /* As the compl has been parsed, reset it; we wont touch it again */
1476 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Sathya Perla3abcded2010-10-03 22:12:27 -07001478 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 return rxcp;
1480}
1481
Eric Dumazet1829b082011-03-01 05:48:12 +00001482static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001485
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001487 gfp |= __GFP_COMP;
1488 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489}
1490
1491/*
1492 * Allocate a page, split it to fragments of size rx_frag_size and post as
1493 * receive buffers to BE
1494 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001495static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496{
Sathya Perla3abcded2010-10-03 22:12:27 -07001497 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001498 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001499 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 struct page *pagep = NULL;
1501 struct be_eth_rx_d *rxd;
1502 u64 page_dmaaddr = 0, frag_dmaaddr;
1503 u32 posted, page_offset = 0;
1504
Sathya Perla3abcded2010-10-03 22:12:27 -07001505 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001508 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001510 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 break;
1512 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001513 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514 0, adapter->big_page_size,
1515 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 page_info->page_offset = 0;
1517 } else {
1518 get_page(pagep);
1519 page_info->page_offset = page_offset + rx_frag_size;
1520 }
1521 page_offset = page_info->page_offset;
1522 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001523 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525
1526 rxd = queue_head_node(rxq);
1527 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529
1530 /* Any space left in the current big page for another frag? */
1531 if ((page_offset + rx_frag_size + rx_frag_size) >
1532 adapter->big_page_size) {
1533 pagep = NULL;
1534 page_info->last_page_user = true;
1535 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001536
1537 prev_page_info = page_info;
1538 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001539 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 }
1541 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001542 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543
1544 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001546 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001547 } else if (atomic_read(&rxq->used) == 0) {
1548 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001549 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551}
1552
Sathya Perla5fb379e2009-06-18 00:02:59 +00001553static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556
1557 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558 return NULL;
1559
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001560 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562
1563 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564
1565 queue_tail_inc(tx_cq);
1566 return txcp;
1567}
1568
Sathya Perla3c8def92011-06-12 20:01:58 +00001569static u16 be_tx_compl_process(struct be_adapter *adapter,
1570 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571{
Sathya Perla3c8def92011-06-12 20:01:58 +00001572 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001573 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001574 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001576 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001579 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001581 sent_skbs[txq->tail] = NULL;
1582
1583 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001584 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001586 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001588 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001589 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001591 unmap_skb_hdr = false;
1592
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 num_wrbs++;
1594 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001595 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001598 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599}
1600
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601/* Return the number of events in the event queue */
1602static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001603{
1604 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001605 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001606
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001607 do {
1608 eqe = queue_tail_node(&eqo->q);
1609 if (eqe->evt == 0)
1610 break;
1611
1612 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001613 eqe->evt = 0;
1614 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001615 queue_tail_inc(&eqo->q);
1616 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001617
1618 return num;
1619}
1620
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001621static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001622{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001623 bool rearm = false;
1624 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001625
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001626 /* Deal with any spurious interrupts that come without events */
1627 if (!num)
1628 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001629
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001630 if (num || msix_enabled(eqo->adapter))
1631 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632
Sathya Perla859b1e42009-08-10 03:43:51 +00001633 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001634 napi_schedule(&eqo->napi);
1635
1636 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001637}
1638
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001639/* Leaves the EQ is disarmed state */
1640static void be_eq_clean(struct be_eq_obj *eqo)
1641{
1642 int num = events_get(eqo);
1643
1644 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1645}
1646
1647static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648{
1649 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001650 struct be_queue_info *rxq = &rxo->q;
1651 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 u16 tail;
1654
1655 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001656 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657 be_rx_compl_discard(rxo, rxcp);
1658 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 }
1660
1661 /* Then free posted rx buffer that were not used */
1662 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001663 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665 put_page(page_info->page);
1666 memset(page_info, 0, sizeof(*page_info));
1667 }
1668 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001669 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670}
1671
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001672static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001674 struct be_tx_obj *txo;
1675 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001676 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001677 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001678 struct sk_buff *sent_skb;
1679 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001680 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681
Sathya Perlaa8e91792009-08-10 03:42:43 +00001682 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1683 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001684 pending_txqs = adapter->num_tx_qs;
1685
1686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 while ((txcp = be_tx_compl_get(&txo->cq))) {
1689 end_idx =
1690 AMAP_GET_BITS(struct amap_eth_tx_compl,
1691 wrb_index, txcp);
1692 num_wrbs += be_tx_compl_process(adapter, txo,
1693 end_idx);
1694 cmpl++;
1695 }
1696 if (cmpl) {
1697 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698 atomic_sub(num_wrbs, &txq->used);
1699 cmpl = 0;
1700 num_wrbs = 0;
1701 }
1702 if (atomic_read(&txq->used) == 0)
1703 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001704 }
1705
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001706 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001707 break;
1708
1709 mdelay(1);
1710 } while (true);
1711
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001712 for_all_tx_queues(adapter, txo, i) {
1713 txq = &txo->q;
1714 if (atomic_read(&txq->used))
1715 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001717
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001718 /* free posted tx for which compls will never arrive */
1719 while (atomic_read(&txq->used)) {
1720 sent_skb = txo->sent_skb_list[txq->tail];
1721 end_idx = txq->tail;
1722 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723 &dummy_wrb);
1724 index_adv(&end_idx, num_wrbs - 1, txq->len);
1725 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726 atomic_sub(num_wrbs, &txq->used);
1727 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001728 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729}
1730
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001731static void be_evt_queues_destroy(struct be_adapter *adapter)
1732{
1733 struct be_eq_obj *eqo;
1734 int i;
1735
1736 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001737 if (eqo->q.created) {
1738 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001739 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001740 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001741 be_queue_free(adapter, &eqo->q);
1742 }
1743}
1744
1745static int be_evt_queues_create(struct be_adapter *adapter)
1746{
1747 struct be_queue_info *eq;
1748 struct be_eq_obj *eqo;
1749 int i, rc;
1750
1751 adapter->num_evt_qs = num_irqs(adapter);
1752
1753 for_all_evt_queues(adapter, eqo, i) {
1754 eqo->adapter = adapter;
1755 eqo->tx_budget = BE_TX_BUDGET;
1756 eqo->idx = i;
1757 eqo->max_eqd = BE_MAX_EQD;
1758 eqo->enable_aic = true;
1759
1760 eq = &eqo->q;
1761 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762 sizeof(struct be_eq_entry));
1763 if (rc)
1764 return rc;
1765
1766 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1767 if (rc)
1768 return rc;
1769 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001770 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001771}
1772
Sathya Perla5fb379e2009-06-18 00:02:59 +00001773static void be_mcc_queues_destroy(struct be_adapter *adapter)
1774{
1775 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001776
Sathya Perla8788fdc2009-07-27 22:52:03 +00001777 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001778 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001779 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001780 be_queue_free(adapter, q);
1781
Sathya Perla8788fdc2009-07-27 22:52:03 +00001782 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001783 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001784 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001785 be_queue_free(adapter, q);
1786}
1787
1788/* Must be called only after TX qs are created as MCC shares TX EQ */
1789static int be_mcc_queues_create(struct be_adapter *adapter)
1790{
1791 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001792
Sathya Perla8788fdc2009-07-27 22:52:03 +00001793 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001794 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001795 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001796 goto err;
1797
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001798 /* Use the default EQ for MCC completions */
1799 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001800 goto mcc_cq_free;
1801
Sathya Perla8788fdc2009-07-27 22:52:03 +00001802 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001803 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1804 goto mcc_cq_destroy;
1805
Sathya Perla8788fdc2009-07-27 22:52:03 +00001806 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001807 goto mcc_q_free;
1808
1809 return 0;
1810
1811mcc_q_free:
1812 be_queue_free(adapter, q);
1813mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001814 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001815mcc_cq_free:
1816 be_queue_free(adapter, cq);
1817err:
1818 return -1;
1819}
1820
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821static void be_tx_queues_destroy(struct be_adapter *adapter)
1822{
1823 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001824 struct be_tx_obj *txo;
1825 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826
Sathya Perla3c8def92011-06-12 20:01:58 +00001827 for_all_tx_queues(adapter, txo, i) {
1828 q = &txo->q;
1829 if (q->created)
1830 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1831 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832
Sathya Perla3c8def92011-06-12 20:01:58 +00001833 q = &txo->cq;
1834 if (q->created)
1835 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1836 be_queue_free(adapter, q);
1837 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838}
1839
Sathya Perladafc0fe2011-10-24 02:45:02 +00001840static int be_num_txqs_want(struct be_adapter *adapter)
1841{
Sathya Perla39f1d942012-05-08 19:41:24 +00001842 if (sriov_want(adapter) || be_is_mc(adapter) ||
1843 lancer_chip(adapter) || !be_physfn(adapter) ||
1844 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001845 return 1;
1846 else
1847 return MAX_TX_QS;
1848}
1849
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001850static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001852 struct be_queue_info *cq, *eq;
1853 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001854 struct be_tx_obj *txo;
1855 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856
Sathya Perladafc0fe2011-10-24 02:45:02 +00001857 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001858 if (adapter->num_tx_qs != MAX_TX_QS) {
1859 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001860 netif_set_real_num_tx_queues(adapter->netdev,
1861 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001862 rtnl_unlock();
1863 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001864
Sathya Perla3c8def92011-06-12 20:01:58 +00001865 for_all_tx_queues(adapter, txo, i) {
1866 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001867 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1868 sizeof(struct be_eth_tx_compl));
1869 if (status)
1870 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001872 /* If num_evt_qs is less than num_tx_qs, then more than
1873 * one txq share an eq
1874 */
1875 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1876 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1877 if (status)
1878 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001879 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881}
1882
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883static int be_tx_qs_create(struct be_adapter *adapter)
1884{
1885 struct be_tx_obj *txo;
1886 int i, status;
1887
1888 for_all_tx_queues(adapter, txo, i) {
1889 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1890 sizeof(struct be_eth_wrb));
1891 if (status)
1892 return status;
1893
1894 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1895 if (status)
1896 return status;
1897 }
1898
1899 return 0;
1900}
1901
1902static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903{
1904 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001905 struct be_rx_obj *rxo;
1906 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
Sathya Perla3abcded2010-10-03 22:12:27 -07001908 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001909 q = &rxo->cq;
1910 if (q->created)
1911 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1912 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914}
1915
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001916static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001917{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001918 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001919 struct be_rx_obj *rxo;
1920 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001922 /* We'll create as many RSS rings as there are irqs.
1923 * But when there's only one irq there's no use creating RSS rings
1924 */
1925 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1926 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001927 if (adapter->num_rx_qs != MAX_RX_QS) {
1928 rtnl_lock();
1929 netif_set_real_num_rx_queues(adapter->netdev,
1930 adapter->num_rx_qs);
1931 rtnl_unlock();
1932 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001933
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001935 for_all_rx_queues(adapter, rxo, i) {
1936 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001937 cq = &rxo->cq;
1938 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1939 sizeof(struct be_eth_rx_compl));
1940 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001941 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1944 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001945 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001946 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001947 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949 if (adapter->num_rx_qs != MAX_RX_QS)
1950 dev_info(&adapter->pdev->dev,
Masanari Iidaf3f9f332012-08-03 02:36:51 +00001951 "Created only %d receive queues\n", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001953 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001954}
1955
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956static irqreturn_t be_intx(int irq, void *dev)
1957{
1958 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001961 /* With INTx only one EQ is used */
1962 num_evts = event_handle(&adapter->eq_obj[0]);
1963 if (num_evts)
1964 return IRQ_HANDLED;
1965 else
1966 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967}
1968
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001971 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001973 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974 return IRQ_HANDLED;
1975}
1976
Sathya Perla2e588f82011-03-11 02:49:26 +00001977static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978{
Sathya Perla2e588f82011-03-11 02:49:26 +00001979 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001980}
1981
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1983 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984{
Sathya Perla3abcded2010-10-03 22:12:27 -07001985 struct be_adapter *adapter = rxo->adapter;
1986 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001987 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 u32 work_done;
1989
1990 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992 if (!rxcp)
1993 break;
1994
Sathya Perla12004ae2011-08-02 19:57:46 +00001995 /* Is it a flush compl that has no data */
1996 if (unlikely(rxcp->num_rcvd == 0))
1997 goto loop_continue;
1998
1999 /* Discard compl with partial DMA Lancer B0 */
2000 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002001 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002002 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002003 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002004
Sathya Perla12004ae2011-08-02 19:57:46 +00002005 /* On BE drop pkts that arrive due to imperfect filtering in
2006 * promiscuous mode on some skews
2007 */
2008 if (unlikely(rxcp->port != adapter->port_num &&
2009 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002010 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002011 goto loop_continue;
2012 }
2013
2014 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002016 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002017 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002018loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002019 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 }
2021
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002022 if (work_done) {
2023 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2026 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002028
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029 return work_done;
2030}
2031
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2033 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038 for (work_done = 0; work_done < budget; work_done++) {
2039 txcp = be_tx_compl_get(&txo->cq);
2040 if (!txcp)
2041 break;
2042 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002043 AMAP_GET_BITS(struct amap_eth_tx_compl,
2044 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045 }
2046
2047 if (work_done) {
2048 be_cq_notify(adapter, txo->cq.id, true, work_done);
2049 atomic_sub(num_wrbs, &txo->q.used);
2050
2051 /* As Tx wrbs have been freed up, wake up netdev queue
2052 * if it was stopped due to lack of tx wrbs. */
2053 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2054 atomic_read(&txo->q.used) < txo->q.len / 2) {
2055 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002056 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002057
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2059 tx_stats(txo)->tx_compl += work_done;
2060 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2061 }
2062 return (work_done < budget); /* Done */
2063}
Sathya Perla3c8def92011-06-12 20:01:58 +00002064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002065int be_poll(struct napi_struct *napi, int budget)
2066{
2067 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2068 struct be_adapter *adapter = eqo->adapter;
2069 int max_work = 0, work, i;
2070 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002071
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002072 /* Process all TXQs serviced by this EQ */
2073 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2074 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2075 eqo->tx_budget, i);
2076 if (!tx_done)
2077 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078 }
2079
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002080 /* This loop will iterate twice for EQ0 in which
2081 * completions of the last RXQ (default one) are also processed
2082 * For other EQs the loop iterates only once
2083 */
2084 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2085 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2086 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002087 }
2088
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002089 if (is_mcc_eqo(eqo))
2090 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002091
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002092 if (max_work < budget) {
2093 napi_complete(napi);
2094 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2095 } else {
2096 /* As we'll continue in polling mode, count and clear events */
2097 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002098 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002100}
2101
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002102void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002103{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002104 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2105 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002106 u32 i;
2107
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002108 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002109 return;
2110
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002111 if (lancer_chip(adapter)) {
2112 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2113 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2114 sliport_err1 = ioread32(adapter->db +
2115 SLIPORT_ERROR1_OFFSET);
2116 sliport_err2 = ioread32(adapter->db +
2117 SLIPORT_ERROR2_OFFSET);
2118 }
2119 } else {
2120 pci_read_config_dword(adapter->pdev,
2121 PCICFG_UE_STATUS_LOW, &ue_lo);
2122 pci_read_config_dword(adapter->pdev,
2123 PCICFG_UE_STATUS_HIGH, &ue_hi);
2124 pci_read_config_dword(adapter->pdev,
2125 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2126 pci_read_config_dword(adapter->pdev,
2127 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002128
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002129 ue_lo = (ue_lo & ~ue_lo_mask);
2130 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002131 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002132
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002133 if (ue_lo || ue_hi ||
2134 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002135 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002136 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002137 "Error detected in the card\n");
2138 }
2139
2140 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2141 dev_err(&adapter->pdev->dev,
2142 "ERR: sliport status 0x%x\n", sliport_status);
2143 dev_err(&adapter->pdev->dev,
2144 "ERR: sliport error1 0x%x\n", sliport_err1);
2145 dev_err(&adapter->pdev->dev,
2146 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002147 }
2148
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002149 if (ue_lo) {
2150 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2151 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002152 dev_err(&adapter->pdev->dev,
2153 "UE: %s bit set\n", ue_status_low_desc[i]);
2154 }
2155 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002156
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002157 if (ue_hi) {
2158 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2159 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002160 dev_err(&adapter->pdev->dev,
2161 "UE: %s bit set\n", ue_status_hi_desc[i]);
2162 }
2163 }
2164
2165}
2166
Sathya Perla8d56ff12009-11-22 22:02:26 +00002167static void be_msix_disable(struct be_adapter *adapter)
2168{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002169 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002170 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002171 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002172 }
2173}
2174
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002175static uint be_num_rss_want(struct be_adapter *adapter)
2176{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002177 u32 num = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002178 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla4cbdaf62012-08-28 20:37:40 +00002179 !sriov_want(adapter) && be_physfn(adapter)) {
Yuval Mintz30e80b52012-07-01 03:19:00 +00002180 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2181 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2182 }
2183 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002184}
2185
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186static void be_msix_enable(struct be_adapter *adapter)
2187{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002188#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002189 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191 /* If RSS queues are not used, need a vec for default RX Q */
2192 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002193 if (be_roce_supported(adapter)) {
2194 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2195 (num_online_cpus() + 1));
2196 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2197 num_vec += num_roce_vec;
2198 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2199 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002201
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002202 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203 adapter->msix_entries[i].entry = i;
2204
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002205 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002206 if (status == 0) {
2207 goto done;
2208 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002209 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002210 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002211 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002213 }
2214 return;
2215done:
Parav Pandit045508a2012-03-26 14:27:13 +00002216 if (be_roce_supported(adapter)) {
2217 if (num_vec > num_roce_vec) {
2218 adapter->num_msix_vec = num_vec - num_roce_vec;
2219 adapter->num_msix_roce_vec =
2220 num_vec - adapter->num_msix_vec;
2221 } else {
2222 adapter->num_msix_vec = num_vec;
2223 adapter->num_msix_roce_vec = 0;
2224 }
2225 } else
2226 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002227 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228}
2229
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002230static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234}
2235
2236static int be_msix_register(struct be_adapter *adapter)
2237{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238 struct net_device *netdev = adapter->netdev;
2239 struct be_eq_obj *eqo;
2240 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 for_all_evt_queues(adapter, eqo, i) {
2243 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2244 vec = be_msix_vec_get(adapter, eqo);
2245 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002246 if (status)
2247 goto err_msix;
2248 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002249
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002251err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2253 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2254 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2255 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002256 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257 return status;
2258}
2259
2260static int be_irq_register(struct be_adapter *adapter)
2261{
2262 struct net_device *netdev = adapter->netdev;
2263 int status;
2264
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002265 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266 status = be_msix_register(adapter);
2267 if (status == 0)
2268 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002269 /* INTx is not supported for VF */
2270 if (!be_physfn(adapter))
2271 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 }
2273
2274 /* INTx */
2275 netdev->irq = adapter->pdev->irq;
2276 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2277 adapter);
2278 if (status) {
2279 dev_err(&adapter->pdev->dev,
2280 "INTx request IRQ failed - err %d\n", status);
2281 return status;
2282 }
2283done:
2284 adapter->isr_registered = true;
2285 return 0;
2286}
2287
2288static void be_irq_unregister(struct be_adapter *adapter)
2289{
2290 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002292 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293
2294 if (!adapter->isr_registered)
2295 return;
2296
2297 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002298 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299 free_irq(netdev->irq, adapter);
2300 goto done;
2301 }
2302
2303 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304 for_all_evt_queues(adapter, eqo, i)
2305 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002306
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307done:
2308 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309}
2310
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002312{
2313 struct be_queue_info *q;
2314 struct be_rx_obj *rxo;
2315 int i;
2316
2317 for_all_rx_queues(adapter, rxo, i) {
2318 q = &rxo->q;
2319 if (q->created) {
2320 be_cmd_rxq_destroy(adapter, q);
2321 /* After the rxq is invalidated, wait for a grace time
2322 * of 1ms for all dma to end and the flush compl to
2323 * arrive
2324 */
2325 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002327 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002328 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002329 }
2330}
2331
Sathya Perla889cd4b2010-05-30 23:33:45 +00002332static int be_close(struct net_device *netdev)
2333{
2334 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 struct be_eq_obj *eqo;
2336 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002337
Parav Pandit045508a2012-03-26 14:27:13 +00002338 be_roce_dev_close(adapter);
2339
Sathya Perla889cd4b2010-05-30 23:33:45 +00002340 be_async_mcc_disable(adapter);
2341
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002342 if (!lancer_chip(adapter))
2343 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002344
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002345 for_all_evt_queues(adapter, eqo, i) {
2346 napi_disable(&eqo->napi);
2347 if (msix_enabled(adapter))
2348 synchronize_irq(be_msix_vec_get(adapter, eqo));
2349 else
2350 synchronize_irq(netdev->irq);
2351 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002352 }
2353
Sathya Perla889cd4b2010-05-30 23:33:45 +00002354 be_irq_unregister(adapter);
2355
Sathya Perla889cd4b2010-05-30 23:33:45 +00002356 /* Wait for all pending tx completions to arrive so that
2357 * all tx skbs are freed.
2358 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002359 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002360
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002362 return 0;
2363}
2364
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002365static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002366{
2367 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002368 int rc, i, j;
2369 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002370
2371 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2373 sizeof(struct be_eth_rx_d));
2374 if (rc)
2375 return rc;
2376 }
2377
2378 /* The FW would like the default RXQ to be created first */
2379 rxo = default_rxo(adapter);
2380 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2381 adapter->if_handle, false, &rxo->rss_id);
2382 if (rc)
2383 return rc;
2384
2385 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002386 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 rx_frag_size, adapter->if_handle,
2388 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002389 if (rc)
2390 return rc;
2391 }
2392
2393 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002394 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2395 for_all_rss_queues(adapter, rxo, i) {
2396 if ((j + i) >= 128)
2397 break;
2398 rsstable[j + i] = rxo->rss_id;
2399 }
2400 }
2401 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002402 if (rc)
2403 return rc;
2404 }
2405
2406 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002408 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002409 return 0;
2410}
2411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002412static int be_open(struct net_device *netdev)
2413{
2414 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002416 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002417 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002418 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002419 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002420
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002422 if (status)
2423 goto err;
2424
Sathya Perla5fb379e2009-06-18 00:02:59 +00002425 be_irq_register(adapter);
2426
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002427 if (!lancer_chip(adapter))
2428 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002429
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002431 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002432
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 for_all_tx_queues(adapter, txo, i)
2434 be_cq_notify(adapter, txo->cq.id, true, 0);
2435
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002436 be_async_mcc_enable(adapter);
2437
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 for_all_evt_queues(adapter, eqo, i) {
2439 napi_enable(&eqo->napi);
2440 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2441 }
2442
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002443 status = be_cmd_link_status_query(adapter, NULL, NULL,
2444 &link_status, 0);
2445 if (!status)
2446 be_link_status_update(adapter, link_status);
2447
Parav Pandit045508a2012-03-26 14:27:13 +00002448 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002449 return 0;
2450err:
2451 be_close(adapter->netdev);
2452 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002453}
2454
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002455static int be_setup_wol(struct be_adapter *adapter, bool enable)
2456{
2457 struct be_dma_mem cmd;
2458 int status = 0;
2459 u8 mac[ETH_ALEN];
2460
2461 memset(mac, 0, ETH_ALEN);
2462
2463 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002464 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2465 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002466 if (cmd.va == NULL)
2467 return -1;
2468 memset(cmd.va, 0, cmd.size);
2469
2470 if (enable) {
2471 status = pci_write_config_dword(adapter->pdev,
2472 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2473 if (status) {
2474 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002475 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002476 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2477 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002478 return status;
2479 }
2480 status = be_cmd_enable_magic_wol(adapter,
2481 adapter->netdev->dev_addr, &cmd);
2482 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2483 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2484 } else {
2485 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2486 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2487 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2488 }
2489
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002490 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002491 return status;
2492}
2493
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002494/*
2495 * Generate a seed MAC address from the PF MAC Address using jhash.
2496 * MAC Address for VFs are assigned incrementally starting from the seed.
2497 * These addresses are programmed in the ASIC by the PF and the VF driver
2498 * queries for the MAC address during its probe.
2499 */
2500static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2501{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002502 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002503 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002504 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002506
2507 be_vf_eth_addr_generate(adapter, mac);
2508
Sathya Perla11ac75e2011-12-13 00:58:50 +00002509 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002510 if (lancer_chip(adapter)) {
2511 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2512 } else {
2513 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002514 vf_cfg->if_handle,
2515 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002516 }
2517
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002518 if (status)
2519 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002520 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002521 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002522 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002523
2524 mac[5] += 1;
2525 }
2526 return status;
2527}
2528
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002529static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002530{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002531 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002532 u32 vf;
2533
Sathya Perla39f1d942012-05-08 19:41:24 +00002534 if (be_find_vfs(adapter, ASSIGNED)) {
2535 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2536 goto done;
2537 }
2538
Sathya Perla11ac75e2011-12-13 00:58:50 +00002539 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002540 if (lancer_chip(adapter))
2541 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2542 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002543 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2544 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002545
Sathya Perla11ac75e2011-12-13 00:58:50 +00002546 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2547 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002548 pci_disable_sriov(adapter->pdev);
2549done:
2550 kfree(adapter->vf_cfg);
2551 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002552}
2553
Sathya Perlaa54769f2011-10-24 02:45:00 +00002554static int be_clear(struct be_adapter *adapter)
2555{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002556 int i = 1;
2557
Sathya Perla191eb752012-02-23 18:50:13 +00002558 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2559 cancel_delayed_work_sync(&adapter->work);
2560 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2561 }
2562
Sathya Perla11ac75e2011-12-13 00:58:50 +00002563 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002564 be_vf_clear(adapter);
2565
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002566 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2567 be_cmd_pmac_del(adapter, adapter->if_handle,
2568 adapter->pmac_id[i], 0);
2569
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002571
2572 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002573 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002574 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002575 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002576
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002577 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002578 return 0;
2579}
2580
Sathya Perla39f1d942012-05-08 19:41:24 +00002581static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002582{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002583 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002584 int vf;
2585
Sathya Perla39f1d942012-05-08 19:41:24 +00002586 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2587 GFP_KERNEL);
2588 if (!adapter->vf_cfg)
2589 return -ENOMEM;
2590
Sathya Perla11ac75e2011-12-13 00:58:50 +00002591 for_all_vfs(adapter, vf_cfg, vf) {
2592 vf_cfg->if_handle = -1;
2593 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002594 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002595 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002596}
2597
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002598static int be_vf_setup(struct be_adapter *adapter)
2599{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002600 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002601 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002602 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002603 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002604 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002605
Sathya Perla39f1d942012-05-08 19:41:24 +00002606 enabled_vfs = be_find_vfs(adapter, ENABLED);
2607 if (enabled_vfs) {
2608 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2609 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2610 return 0;
2611 }
2612
2613 if (num_vfs > adapter->dev_num_vfs) {
2614 dev_warn(dev, "Device supports %d VFs and not %d\n",
2615 adapter->dev_num_vfs, num_vfs);
2616 num_vfs = adapter->dev_num_vfs;
2617 }
2618
2619 status = pci_enable_sriov(adapter->pdev, num_vfs);
2620 if (!status) {
2621 adapter->num_vfs = num_vfs;
2622 } else {
2623 /* Platform doesn't support SRIOV though device supports it */
2624 dev_warn(dev, "SRIOV enable failed\n");
2625 return 0;
2626 }
2627
2628 status = be_vf_setup_init(adapter);
2629 if (status)
2630 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002631
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002632 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2633 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002634 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002635 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2636 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002637 if (status)
2638 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002639 }
2640
Sathya Perla39f1d942012-05-08 19:41:24 +00002641 if (!enabled_vfs) {
2642 status = be_vf_eth_addr_config(adapter);
2643 if (status)
2644 goto err;
2645 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002646
Sathya Perla11ac75e2011-12-13 00:58:50 +00002647 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002648 lnk_speed = 1000;
2649 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002650 if (status)
2651 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002652 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002653
2654 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2655 vf + 1, vf_cfg->if_handle);
2656 if (status)
2657 goto err;
2658 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002659 }
2660 return 0;
2661err:
2662 return status;
2663}
2664
Sathya Perla30128032011-11-10 19:17:57 +00002665static void be_setup_init(struct be_adapter *adapter)
2666{
2667 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002668 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002669 adapter->if_handle = -1;
2670 adapter->be3_native = false;
2671 adapter->promiscuous = false;
2672 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002673 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002674}
2675
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002676static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2677 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002678{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002679 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002680
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002681 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2682 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2683 if (!lancer_chip(adapter) && !be_physfn(adapter))
2684 *active_mac = true;
2685 else
2686 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002687
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002688 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002689 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002690
2691 if (lancer_chip(adapter)) {
2692 status = be_cmd_get_mac_from_list(adapter, mac,
2693 active_mac, pmac_id, 0);
2694 if (*active_mac) {
2695 status = be_cmd_mac_addr_query(adapter, mac,
2696 MAC_ADDRESS_TYPE_NETWORK,
2697 false, if_handle,
2698 *pmac_id);
2699 }
2700 } else if (be_physfn(adapter)) {
2701 /* For BE3, for PF get permanent MAC */
2702 status = be_cmd_mac_addr_query(adapter, mac,
2703 MAC_ADDRESS_TYPE_NETWORK, true,
2704 0, 0);
2705 *active_mac = false;
2706 } else {
2707 /* For BE3, for VF get soft MAC assigned by PF*/
2708 status = be_cmd_mac_addr_query(adapter, mac,
2709 MAC_ADDRESS_TYPE_NETWORK, false,
2710 if_handle, 0);
2711 *active_mac = true;
2712 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002713 return status;
2714}
2715
Sathya Perla39f1d942012-05-08 19:41:24 +00002716/* Routine to query per function resource limits */
2717static int be_get_config(struct be_adapter *adapter)
2718{
2719 int pos;
2720 u16 dev_num_vfs;
2721
2722 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2723 if (pos) {
2724 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2725 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002726 if (!lancer_chip(adapter))
2727 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002728 adapter->dev_num_vfs = dev_num_vfs;
2729 }
2730 return 0;
2731}
2732
Sathya Perla5fb379e2009-06-18 00:02:59 +00002733static int be_setup(struct be_adapter *adapter)
2734{
Sathya Perla39f1d942012-05-08 19:41:24 +00002735 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002736 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002737 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002738 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002739 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002740 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002741
Sathya Perla30128032011-11-10 19:17:57 +00002742 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002743
Sathya Perla39f1d942012-05-08 19:41:24 +00002744 be_get_config(adapter);
2745
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002746 be_cmd_req_native_mode(adapter);
2747
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002748 be_msix_enable(adapter);
2749
2750 status = be_evt_queues_create(adapter);
2751 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002752 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002753
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002754 status = be_tx_cqs_create(adapter);
2755 if (status)
2756 goto err;
2757
2758 status = be_rx_cqs_create(adapter);
2759 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002760 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002761
Sathya Perla5fb379e2009-06-18 00:02:59 +00002762 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002763 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002764 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002765
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002766 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2767 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2768 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002769 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2770
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002771 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2772 cap_flags |= BE_IF_FLAGS_RSS;
2773 en_flags |= BE_IF_FLAGS_RSS;
2774 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002775
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002776 if (lancer_chip(adapter) && !be_physfn(adapter)) {
2777 en_flags = BE_IF_FLAGS_UNTAGGED |
2778 BE_IF_FLAGS_BROADCAST |
2779 BE_IF_FLAGS_MULTICAST;
2780 cap_flags = en_flags;
2781 }
2782
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002783 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002784 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002785 if (status != 0)
2786 goto err;
2787
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002788 memset(mac, 0, ETH_ALEN);
2789 active_mac = false;
2790 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2791 &active_mac, &adapter->pmac_id[0]);
2792 if (status != 0)
2793 goto err;
2794
2795 if (!active_mac) {
2796 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2797 &adapter->pmac_id[0], 0);
2798 if (status != 0)
2799 goto err;
2800 }
2801
2802 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2803 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2804 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002805 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002806
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002807 status = be_tx_qs_create(adapter);
2808 if (status)
2809 goto err;
2810
Sathya Perla04b71172011-09-27 13:30:27 -04002811 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002812
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002813 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002814 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002815
2816 be_set_rx_mode(adapter->netdev);
2817
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002818 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002819
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002820 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2821 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002822 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002823
Sathya Perla39f1d942012-05-08 19:41:24 +00002824 if (be_physfn(adapter) && num_vfs) {
2825 if (adapter->dev_num_vfs)
2826 be_vf_setup(adapter);
2827 else
2828 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002829 }
2830
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002831 be_cmd_get_phy_info(adapter);
2832 if (be_pause_supported(adapter))
2833 adapter->phy.fc_autoneg = 1;
2834
Sathya Perla191eb752012-02-23 18:50:13 +00002835 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2836 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002837 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002838err:
2839 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002840 return status;
2841}
2842
Ivan Vecera66268732011-12-08 01:31:21 +00002843#ifdef CONFIG_NET_POLL_CONTROLLER
2844static void be_netpoll(struct net_device *netdev)
2845{
2846 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002847 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002848 int i;
2849
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002850 for_all_evt_queues(adapter, eqo, i)
2851 event_handle(eqo);
2852
2853 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002854}
2855#endif
2856
Ajit Khaparde84517482009-09-04 03:12:16 +00002857#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002858char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2859
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002860static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002861 const u8 *p, u32 img_start, int image_size,
2862 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002863{
2864 u32 crc_offset;
2865 u8 flashed_crc[4];
2866 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002867
2868 crc_offset = hdr_size + img_start + image_size - 4;
2869
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002870 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002871
2872 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002873 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002874 if (status) {
2875 dev_err(&adapter->pdev->dev,
2876 "could not get crc from flash, not flashing redboot\n");
2877 return false;
2878 }
2879
2880 /*update redboot only if crc does not match*/
2881 if (!memcmp(flashed_crc, p, 4))
2882 return false;
2883 else
2884 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002885}
2886
Sathya Perla306f1342011-08-02 19:57:45 +00002887static bool phy_flashing_required(struct be_adapter *adapter)
2888{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002889 return (adapter->phy.phy_type == TN_8022 &&
2890 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002891}
2892
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002893static bool is_comp_in_ufi(struct be_adapter *adapter,
2894 struct flash_section_info *fsec, int type)
2895{
2896 int i = 0, img_type = 0;
2897 struct flash_section_info_g2 *fsec_g2 = NULL;
2898
2899 if (adapter->generation != BE_GEN3)
2900 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2901
2902 for (i = 0; i < MAX_FLASH_COMP; i++) {
2903 if (fsec_g2)
2904 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2905 else
2906 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2907
2908 if (img_type == type)
2909 return true;
2910 }
2911 return false;
2912
2913}
2914
2915struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2916 int header_size,
2917 const struct firmware *fw)
2918{
2919 struct flash_section_info *fsec = NULL;
2920 const u8 *p = fw->data;
2921
2922 p += header_size;
2923 while (p < (fw->data + fw->size)) {
2924 fsec = (struct flash_section_info *)p;
2925 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2926 return fsec;
2927 p += 32;
2928 }
2929 return NULL;
2930}
2931
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002932static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002933 const struct firmware *fw,
2934 struct be_dma_mem *flash_cmd,
2935 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002936
Ajit Khaparde84517482009-09-04 03:12:16 +00002937{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002938 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002939 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002940 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002941 int num_bytes;
2942 const u8 *p = fw->data;
2943 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002944 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002945 int num_comp, hdr_size;
2946 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002947
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002948 struct flash_comp gen3_flash_types[] = {
2949 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2950 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2951 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2952 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2953 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2954 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2955 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2956 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2957 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2958 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2959 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2960 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2961 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2962 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2963 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2964 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2965 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2966 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2967 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2968 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002969 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002970
2971 struct flash_comp gen2_flash_types[] = {
2972 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2973 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2974 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2975 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2976 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2977 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2978 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2979 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2980 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2981 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2982 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2983 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2984 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2985 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2986 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2987 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002988 };
2989
2990 if (adapter->generation == BE_GEN3) {
2991 pflashcomp = gen3_flash_types;
2992 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002993 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002994 } else {
2995 pflashcomp = gen2_flash_types;
2996 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002997 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002998 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002999 /* Get flash section info*/
3000 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3001 if (!fsec) {
3002 dev_err(&adapter->pdev->dev,
3003 "Invalid Cookie. UFI corrupted ?\n");
3004 return -1;
3005 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003006 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003007 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003008 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003009
3010 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3011 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3012 continue;
3013
3014 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003015 if (!phy_flashing_required(adapter))
3016 continue;
3017 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003018
3019 hdr_size = filehdr_size +
3020 (num_of_images * sizeof(struct image_hdr));
3021
3022 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3023 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3024 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003025 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003026
3027 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003028 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003029 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003030 if (p + pflashcomp[i].size > fw->data + fw->size)
3031 return -1;
3032 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003033 while (total_bytes) {
3034 if (total_bytes > 32*1024)
3035 num_bytes = 32*1024;
3036 else
3037 num_bytes = total_bytes;
3038 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003039 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003040 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003041 flash_op = FLASHROM_OPER_PHY_FLASH;
3042 else
3043 flash_op = FLASHROM_OPER_FLASH;
3044 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003045 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003046 flash_op = FLASHROM_OPER_PHY_SAVE;
3047 else
3048 flash_op = FLASHROM_OPER_SAVE;
3049 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003050 memcpy(req->params.data_buf, p, num_bytes);
3051 p += num_bytes;
3052 status = be_cmd_write_flashrom(adapter, flash_cmd,
3053 pflashcomp[i].optype, flash_op, num_bytes);
3054 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003055 if ((status == ILLEGAL_IOCTL_REQ) &&
3056 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003057 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003058 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003059 dev_err(&adapter->pdev->dev,
3060 "cmd to write to flash rom failed.\n");
3061 return -1;
3062 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003063 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003064 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003065 return 0;
3066}
3067
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003068static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3069{
3070 if (fhdr == NULL)
3071 return 0;
3072 if (fhdr->build[0] == '3')
3073 return BE_GEN3;
3074 else if (fhdr->build[0] == '2')
3075 return BE_GEN2;
3076 else
3077 return 0;
3078}
3079
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003080static int lancer_wait_idle(struct be_adapter *adapter)
3081{
3082#define SLIPORT_IDLE_TIMEOUT 30
3083 u32 reg_val;
3084 int status = 0, i;
3085
3086 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3087 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3088 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3089 break;
3090
3091 ssleep(1);
3092 }
3093
3094 if (i == SLIPORT_IDLE_TIMEOUT)
3095 status = -1;
3096
3097 return status;
3098}
3099
3100static int lancer_fw_reset(struct be_adapter *adapter)
3101{
3102 int status = 0;
3103
3104 status = lancer_wait_idle(adapter);
3105 if (status)
3106 return status;
3107
3108 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3109 PHYSDEV_CONTROL_OFFSET);
3110
3111 return status;
3112}
3113
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003114static int lancer_fw_download(struct be_adapter *adapter,
3115 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003116{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003117#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3118#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3119 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003120 const u8 *data_ptr = NULL;
3121 u8 *dest_image_ptr = NULL;
3122 size_t image_size = 0;
3123 u32 chunk_size = 0;
3124 u32 data_written = 0;
3125 u32 offset = 0;
3126 int status = 0;
3127 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003128 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003129
3130 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3131 dev_err(&adapter->pdev->dev,
3132 "FW Image not properly aligned. "
3133 "Length must be 4 byte aligned.\n");
3134 status = -EINVAL;
3135 goto lancer_fw_exit;
3136 }
3137
3138 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3139 + LANCER_FW_DOWNLOAD_CHUNK;
3140 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3141 &flash_cmd.dma, GFP_KERNEL);
3142 if (!flash_cmd.va) {
3143 status = -ENOMEM;
3144 dev_err(&adapter->pdev->dev,
3145 "Memory allocation failure while flashing\n");
3146 goto lancer_fw_exit;
3147 }
3148
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003149 dest_image_ptr = flash_cmd.va +
3150 sizeof(struct lancer_cmd_req_write_object);
3151 image_size = fw->size;
3152 data_ptr = fw->data;
3153
3154 while (image_size) {
3155 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3156
3157 /* Copy the image chunk content. */
3158 memcpy(dest_image_ptr, data_ptr, chunk_size);
3159
3160 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003161 chunk_size, offset,
3162 LANCER_FW_DOWNLOAD_LOCATION,
3163 &data_written, &change_status,
3164 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003165 if (status)
3166 break;
3167
3168 offset += data_written;
3169 data_ptr += data_written;
3170 image_size -= data_written;
3171 }
3172
3173 if (!status) {
3174 /* Commit the FW written */
3175 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003176 0, offset,
3177 LANCER_FW_DOWNLOAD_LOCATION,
3178 &data_written, &change_status,
3179 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003180 }
3181
3182 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3183 flash_cmd.dma);
3184 if (status) {
3185 dev_err(&adapter->pdev->dev,
3186 "Firmware load error. "
3187 "Status code: 0x%x Additional Status: 0x%x\n",
3188 status, add_status);
3189 goto lancer_fw_exit;
3190 }
3191
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003192 if (change_status == LANCER_FW_RESET_NEEDED) {
3193 status = lancer_fw_reset(adapter);
3194 if (status) {
3195 dev_err(&adapter->pdev->dev,
3196 "Adapter busy for FW reset.\n"
3197 "New FW will not be active.\n");
3198 goto lancer_fw_exit;
3199 }
3200 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3201 dev_err(&adapter->pdev->dev,
3202 "System reboot required for new FW"
3203 " to be active\n");
3204 }
3205
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003206 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3207lancer_fw_exit:
3208 return status;
3209}
3210
3211static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3212{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003213 struct flash_file_hdr_g2 *fhdr;
3214 struct flash_file_hdr_g3 *fhdr3;
3215 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003216 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003217 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003218 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003219
3220 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003221 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003222
Ajit Khaparde84517482009-09-04 03:12:16 +00003223 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003224 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3225 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003226 if (!flash_cmd.va) {
3227 status = -ENOMEM;
3228 dev_err(&adapter->pdev->dev,
3229 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003230 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003231 }
3232
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003233 if ((adapter->generation == BE_GEN3) &&
3234 (get_ufigen_type(fhdr) == BE_GEN3)) {
3235 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003236 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3237 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003238 img_hdr_ptr = (struct image_hdr *) (fw->data +
3239 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003240 i * sizeof(struct image_hdr)));
3241 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3242 status = be_flash_data(adapter, fw, &flash_cmd,
3243 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003244 }
3245 } else if ((adapter->generation == BE_GEN2) &&
3246 (get_ufigen_type(fhdr) == BE_GEN2)) {
3247 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3248 } else {
3249 dev_err(&adapter->pdev->dev,
3250 "UFI and Interface are not compatible for flashing\n");
3251 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003252 }
3253
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003254 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3255 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003256 if (status) {
3257 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003258 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003259 }
3260
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003261 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003262
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003263be_fw_exit:
3264 return status;
3265}
3266
3267int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3268{
3269 const struct firmware *fw;
3270 int status;
3271
3272 if (!netif_running(adapter->netdev)) {
3273 dev_err(&adapter->pdev->dev,
3274 "Firmware load not allowed (interface is down)\n");
3275 return -1;
3276 }
3277
3278 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3279 if (status)
3280 goto fw_exit;
3281
3282 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3283
3284 if (lancer_chip(adapter))
3285 status = lancer_fw_download(adapter, fw);
3286 else
3287 status = be_fw_download(adapter, fw);
3288
Ajit Khaparde84517482009-09-04 03:12:16 +00003289fw_exit:
3290 release_firmware(fw);
3291 return status;
3292}
3293
stephen hemmingere5686ad2012-01-05 19:10:25 +00003294static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003295 .ndo_open = be_open,
3296 .ndo_stop = be_close,
3297 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003298 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003299 .ndo_set_mac_address = be_mac_addr_set,
3300 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003301 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003302 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003303 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3304 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003305 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003306 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003307 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003308 .ndo_get_vf_config = be_get_vf_config,
3309#ifdef CONFIG_NET_POLL_CONTROLLER
3310 .ndo_poll_controller = be_netpoll,
3311#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003312};
3313
3314static void be_netdev_init(struct net_device *netdev)
3315{
3316 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003317 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003318 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003319
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003320 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003321 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3322 NETIF_F_HW_VLAN_TX;
3323 if (be_multi_rxq(adapter))
3324 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003325
3326 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003327 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003328
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003329 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003330 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003331
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003332 netdev->priv_flags |= IFF_UNICAST_FLT;
3333
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334 netdev->flags |= IFF_MULTICAST;
3335
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003336 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003337
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003338 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003339
3340 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3341
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003342 for_all_evt_queues(adapter, eqo, i)
3343 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003344}
3345
3346static void be_unmap_pci_bars(struct be_adapter *adapter)
3347{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003348 if (adapter->csr)
3349 iounmap(adapter->csr);
3350 if (adapter->db)
3351 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003352 if (adapter->roce_db.base)
3353 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3354}
3355
3356static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3357{
3358 struct pci_dev *pdev = adapter->pdev;
3359 u8 __iomem *addr;
3360
3361 addr = pci_iomap(pdev, 2, 0);
3362 if (addr == NULL)
3363 return -ENOMEM;
3364
3365 adapter->roce_db.base = addr;
3366 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3367 adapter->roce_db.size = 8192;
3368 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3369 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003370}
3371
3372static int be_map_pci_bars(struct be_adapter *adapter)
3373{
3374 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003375 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003376
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003377 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003378 if (be_type_2_3(adapter)) {
3379 addr = ioremap_nocache(
3380 pci_resource_start(adapter->pdev, 0),
3381 pci_resource_len(adapter->pdev, 0));
3382 if (addr == NULL)
3383 return -ENOMEM;
3384 adapter->db = addr;
3385 }
3386 if (adapter->if_type == SLI_INTF_TYPE_3) {
3387 if (lancer_roce_map_pci_bars(adapter))
3388 goto pci_map_err;
3389 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003390 return 0;
3391 }
3392
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003393 if (be_physfn(adapter)) {
3394 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3395 pci_resource_len(adapter->pdev, 2));
3396 if (addr == NULL)
3397 return -ENOMEM;
3398 adapter->csr = addr;
3399 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003400
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003401 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003402 db_reg = 4;
3403 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003404 if (be_physfn(adapter))
3405 db_reg = 4;
3406 else
3407 db_reg = 0;
3408 }
3409 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3410 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003411 if (addr == NULL)
3412 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003413 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003414 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3415 adapter->roce_db.size = 4096;
3416 adapter->roce_db.io_addr =
3417 pci_resource_start(adapter->pdev, db_reg);
3418 adapter->roce_db.total_size =
3419 pci_resource_len(adapter->pdev, db_reg);
3420 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003421 return 0;
3422pci_map_err:
3423 be_unmap_pci_bars(adapter);
3424 return -ENOMEM;
3425}
3426
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003427static void be_ctrl_cleanup(struct be_adapter *adapter)
3428{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003429 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003430
3431 be_unmap_pci_bars(adapter);
3432
3433 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003434 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3435 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003436
Sathya Perla5b8821b2011-08-02 19:57:44 +00003437 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003438 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003439 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3440 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003441}
3442
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003443static int be_ctrl_init(struct be_adapter *adapter)
3444{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003445 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3446 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003447 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003448 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449
3450 status = be_map_pci_bars(adapter);
3451 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003452 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003453
3454 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003455 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3456 mbox_mem_alloc->size,
3457 &mbox_mem_alloc->dma,
3458 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003459 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003460 status = -ENOMEM;
3461 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003462 }
3463 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3464 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3465 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3466 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003467
Sathya Perla5b8821b2011-08-02 19:57:44 +00003468 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3469 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3470 &rx_filter->dma, GFP_KERNEL);
3471 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003472 status = -ENOMEM;
3473 goto free_mbox;
3474 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003475 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003476
Ivan Vecera29849612010-12-14 05:43:19 +00003477 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003478 spin_lock_init(&adapter->mcc_lock);
3479 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003480
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003481 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003482 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003483 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003484
3485free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003486 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3487 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003488
3489unmap_pci_bars:
3490 be_unmap_pci_bars(adapter);
3491
3492done:
3493 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494}
3495
3496static void be_stats_cleanup(struct be_adapter *adapter)
3497{
Sathya Perla3abcded2010-10-03 22:12:27 -07003498 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003499
3500 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003501 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3502 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003503}
3504
3505static int be_stats_init(struct be_adapter *adapter)
3506{
Sathya Perla3abcded2010-10-03 22:12:27 -07003507 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003508
Selvin Xavier005d5692011-05-16 07:36:35 +00003509 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003510 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003511 } else {
3512 if (lancer_chip(adapter))
3513 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3514 else
3515 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3516 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003517 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3518 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003519 if (cmd->va == NULL)
3520 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003521 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003522 return 0;
3523}
3524
3525static void __devexit be_remove(struct pci_dev *pdev)
3526{
3527 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003528
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003529 if (!adapter)
3530 return;
3531
Parav Pandit045508a2012-03-26 14:27:13 +00003532 be_roce_dev_remove(adapter);
3533
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003534 cancel_delayed_work_sync(&adapter->func_recovery_work);
3535
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003536 unregister_netdev(adapter->netdev);
3537
Sathya Perla5fb379e2009-06-18 00:02:59 +00003538 be_clear(adapter);
3539
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003540 /* tell fw we're done with firing cmds */
3541 be_cmd_fw_clean(adapter);
3542
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003543 be_stats_cleanup(adapter);
3544
3545 be_ctrl_cleanup(adapter);
3546
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003547 pci_set_drvdata(pdev, NULL);
3548 pci_release_regions(pdev);
3549 pci_disable_device(pdev);
3550
3551 free_netdev(adapter->netdev);
3552}
3553
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003554bool be_is_wol_supported(struct be_adapter *adapter)
3555{
3556 return ((adapter->wol_cap & BE_WOL_CAP) &&
3557 !be_is_wol_excluded(adapter)) ? true : false;
3558}
3559
Somnath Kotur941a77d2012-05-17 22:59:03 +00003560u32 be_get_fw_log_level(struct be_adapter *adapter)
3561{
3562 struct be_dma_mem extfat_cmd;
3563 struct be_fat_conf_params *cfgs;
3564 int status;
3565 u32 level = 0;
3566 int j;
3567
3568 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3569 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3570 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3571 &extfat_cmd.dma);
3572
3573 if (!extfat_cmd.va) {
3574 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3575 __func__);
3576 goto err;
3577 }
3578
3579 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3580 if (!status) {
3581 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3582 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003583 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003584 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3585 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3586 }
3587 }
3588 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3589 extfat_cmd.dma);
3590err:
3591 return level;
3592}
Sathya Perla39f1d942012-05-08 19:41:24 +00003593static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003594{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003595 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003596 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003597
Sathya Perla3abcded2010-10-03 22:12:27 -07003598 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3599 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003600 if (status)
3601 return status;
3602
Sathya Perla752961a2011-10-24 02:45:03 +00003603 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003604 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003605 else
3606 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3607
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003608 if (be_physfn(adapter))
3609 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3610 else
3611 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3612
3613 /* primary mac needs 1 pmac entry */
3614 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3615 sizeof(u32), GFP_KERNEL);
3616 if (!adapter->pmac_id)
3617 return -ENOMEM;
3618
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003619 status = be_cmd_get_cntl_attributes(adapter);
3620 if (status)
3621 return status;
3622
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003623 status = be_cmd_get_acpi_wol_cap(adapter);
3624 if (status) {
3625 /* in case of a failure to get wol capabillities
3626 * check the exclusion list to determine WOL capability */
3627 if (!be_is_wol_excluded(adapter))
3628 adapter->wol_cap |= BE_WOL_CAP;
3629 }
3630
3631 if (be_is_wol_supported(adapter))
3632 adapter->wol = true;
3633
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003634 /* Must be a power of 2 or else MODULO will BUG_ON */
3635 adapter->be_get_temp_freq = 64;
3636
Somnath Kotur941a77d2012-05-17 22:59:03 +00003637 level = be_get_fw_log_level(adapter);
3638 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3639
Sathya Perla2243e2e2009-11-22 22:02:03 +00003640 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003641}
3642
Sathya Perla39f1d942012-05-08 19:41:24 +00003643static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003644{
3645 struct pci_dev *pdev = adapter->pdev;
3646 u32 sli_intf = 0, if_type;
3647
3648 switch (pdev->device) {
3649 case BE_DEVICE_ID1:
3650 case OC_DEVICE_ID1:
3651 adapter->generation = BE_GEN2;
3652 break;
3653 case BE_DEVICE_ID2:
3654 case OC_DEVICE_ID2:
3655 adapter->generation = BE_GEN3;
3656 break;
3657 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003658 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003659 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003660 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3661 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003662 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3663 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003664 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003665 !be_type_2_3(adapter)) {
3666 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3667 return -EINVAL;
3668 }
3669 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3670 SLI_INTF_FAMILY_SHIFT);
3671 adapter->generation = BE_GEN3;
3672 break;
3673 case OC_DEVICE_ID5:
3674 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3675 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003676 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3677 return -EINVAL;
3678 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003679 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3680 SLI_INTF_FAMILY_SHIFT);
3681 adapter->generation = BE_GEN3;
3682 break;
3683 default:
3684 adapter->generation = 0;
3685 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003686
3687 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3688 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003689 return 0;
3690}
3691
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003692static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003693{
3694 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003695
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003696 status = lancer_test_and_set_rdy_state(adapter);
3697 if (status)
3698 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003699
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003700 if (netif_running(adapter->netdev))
3701 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003702
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003703 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003704
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003705 adapter->hw_error = false;
3706 adapter->fw_timeout = false;
3707
3708 status = be_setup(adapter);
3709 if (status)
3710 goto err;
3711
3712 if (netif_running(adapter->netdev)) {
3713 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003714 if (status)
3715 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003716 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003717
3718 dev_err(&adapter->pdev->dev,
3719 "Adapter SLIPORT recovery succeeded\n");
3720 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003721err:
3722 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003723 "Adapter SLIPORT recovery failed\n");
3724
3725 return status;
3726}
3727
3728static void be_func_recovery_task(struct work_struct *work)
3729{
3730 struct be_adapter *adapter =
3731 container_of(work, struct be_adapter, func_recovery_work.work);
3732 int status;
3733
3734 be_detect_error(adapter);
3735
3736 if (adapter->hw_error && lancer_chip(adapter)) {
3737
3738 if (adapter->eeh_error)
3739 goto out;
3740
3741 rtnl_lock();
3742 netif_device_detach(adapter->netdev);
3743 rtnl_unlock();
3744
3745 status = lancer_recover_func(adapter);
3746
3747 if (!status)
3748 netif_device_attach(adapter->netdev);
3749 }
3750
3751out:
3752 schedule_delayed_work(&adapter->func_recovery_work,
3753 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003754}
3755
3756static void be_worker(struct work_struct *work)
3757{
3758 struct be_adapter *adapter =
3759 container_of(work, struct be_adapter, work.work);
3760 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003761 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003762 int i;
3763
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003764 /* when interrupts are not yet enabled, just reap any pending
3765 * mcc completions */
3766 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003767 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003768 goto reschedule;
3769 }
3770
3771 if (!adapter->stats_cmd_sent) {
3772 if (lancer_chip(adapter))
3773 lancer_cmd_get_pport_stats(adapter,
3774 &adapter->stats_cmd);
3775 else
3776 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3777 }
3778
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003779 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3780 be_cmd_get_die_temperature(adapter);
3781
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003782 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003783 if (rxo->rx_post_starved) {
3784 rxo->rx_post_starved = false;
3785 be_post_rx_frags(rxo, GFP_KERNEL);
3786 }
3787 }
3788
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003789 for_all_evt_queues(adapter, eqo, i)
3790 be_eqd_update(adapter, eqo);
3791
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003792reschedule:
3793 adapter->work_counter++;
3794 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3795}
3796
Sathya Perla39f1d942012-05-08 19:41:24 +00003797static bool be_reset_required(struct be_adapter *adapter)
3798{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003799 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003800}
3801
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003802static int __devinit be_probe(struct pci_dev *pdev,
3803 const struct pci_device_id *pdev_id)
3804{
3805 int status = 0;
3806 struct be_adapter *adapter;
3807 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003808 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003809
3810 status = pci_enable_device(pdev);
3811 if (status)
3812 goto do_none;
3813
3814 status = pci_request_regions(pdev, DRV_NAME);
3815 if (status)
3816 goto disable_dev;
3817 pci_set_master(pdev);
3818
Sathya Perla7f640062012-06-05 19:37:20 +00003819 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003820 if (netdev == NULL) {
3821 status = -ENOMEM;
3822 goto rel_reg;
3823 }
3824 adapter = netdev_priv(netdev);
3825 adapter->pdev = pdev;
3826 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003827
Sathya Perla39f1d942012-05-08 19:41:24 +00003828 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003829 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003830 goto free_netdev;
3831
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003832 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003833 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003834
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003835 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003836 if (!status) {
3837 netdev->features |= NETIF_F_HIGHDMA;
3838 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003839 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840 if (status) {
3841 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3842 goto free_netdev;
3843 }
3844 }
3845
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003846 status = be_ctrl_init(adapter);
3847 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003848 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003849
Sathya Perla2243e2e2009-11-22 22:02:03 +00003850 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003851 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003852 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003853 if (status)
3854 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003855 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003856
3857 /* tell fw we're ready to fire cmds */
3858 status = be_cmd_fw_init(adapter);
3859 if (status)
3860 goto ctrl_clean;
3861
Sathya Perla39f1d942012-05-08 19:41:24 +00003862 if (be_reset_required(adapter)) {
3863 status = be_cmd_reset_function(adapter);
3864 if (status)
3865 goto ctrl_clean;
3866 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003867
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003868 /* The INTR bit may be set in the card when probed by a kdump kernel
3869 * after a crash.
3870 */
3871 if (!lancer_chip(adapter))
3872 be_intr_set(adapter, false);
3873
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003874 status = be_stats_init(adapter);
3875 if (status)
3876 goto ctrl_clean;
3877
Sathya Perla39f1d942012-05-08 19:41:24 +00003878 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003879 if (status)
3880 goto stats_clean;
3881
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003882 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003883 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003884 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003885
Sathya Perla5fb379e2009-06-18 00:02:59 +00003886 status = be_setup(adapter);
3887 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003888 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003889
Sathya Perla3abcded2010-10-03 22:12:27 -07003890 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003891 status = register_netdev(netdev);
3892 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003893 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003894
Parav Pandit045508a2012-03-26 14:27:13 +00003895 be_roce_dev_add(adapter);
3896
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003897 schedule_delayed_work(&adapter->func_recovery_work,
3898 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003899
3900 be_cmd_query_port_name(adapter, &port_name);
3901
3902 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
3903 port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003904
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003905 return 0;
3906
Sathya Perla5fb379e2009-06-18 00:02:59 +00003907unsetup:
3908 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003909msix_disable:
3910 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003911stats_clean:
3912 be_stats_cleanup(adapter);
3913ctrl_clean:
3914 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003915free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003916 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003917 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003918rel_reg:
3919 pci_release_regions(pdev);
3920disable_dev:
3921 pci_disable_device(pdev);
3922do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003923 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003924 return status;
3925}
3926
3927static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3928{
3929 struct be_adapter *adapter = pci_get_drvdata(pdev);
3930 struct net_device *netdev = adapter->netdev;
3931
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003932 if (adapter->wol)
3933 be_setup_wol(adapter, true);
3934
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003935 cancel_delayed_work_sync(&adapter->func_recovery_work);
3936
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003937 netif_device_detach(netdev);
3938 if (netif_running(netdev)) {
3939 rtnl_lock();
3940 be_close(netdev);
3941 rtnl_unlock();
3942 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003943 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003944
3945 pci_save_state(pdev);
3946 pci_disable_device(pdev);
3947 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3948 return 0;
3949}
3950
3951static int be_resume(struct pci_dev *pdev)
3952{
3953 int status = 0;
3954 struct be_adapter *adapter = pci_get_drvdata(pdev);
3955 struct net_device *netdev = adapter->netdev;
3956
3957 netif_device_detach(netdev);
3958
3959 status = pci_enable_device(pdev);
3960 if (status)
3961 return status;
3962
3963 pci_set_power_state(pdev, 0);
3964 pci_restore_state(pdev);
3965
Sathya Perla2243e2e2009-11-22 22:02:03 +00003966 /* tell fw we're ready to fire cmds */
3967 status = be_cmd_fw_init(adapter);
3968 if (status)
3969 return status;
3970
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003971 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003972 if (netif_running(netdev)) {
3973 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003974 be_open(netdev);
3975 rtnl_unlock();
3976 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003977
3978 schedule_delayed_work(&adapter->func_recovery_work,
3979 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003980 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003981
3982 if (adapter->wol)
3983 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003984
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003985 return 0;
3986}
3987
Sathya Perla82456b02010-02-17 01:35:37 +00003988/*
3989 * An FLR will stop BE from DMAing any data.
3990 */
3991static void be_shutdown(struct pci_dev *pdev)
3992{
3993 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003994
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003995 if (!adapter)
3996 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003997
Sathya Perla0f4a6822011-03-21 20:49:28 +00003998 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003999 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004000
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004001 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004002
Sathya Perla82456b02010-02-17 01:35:37 +00004003 if (adapter->wol)
4004 be_setup_wol(adapter, true);
4005
Ajit Khaparde57841862011-04-06 18:08:43 +00004006 be_cmd_reset_function(adapter);
4007
Sathya Perla82456b02010-02-17 01:35:37 +00004008 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004009}
4010
Sathya Perlacf588472010-02-14 21:22:01 +00004011static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4012 pci_channel_state_t state)
4013{
4014 struct be_adapter *adapter = pci_get_drvdata(pdev);
4015 struct net_device *netdev = adapter->netdev;
4016
4017 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4018
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004019 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004020
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004021 cancel_delayed_work_sync(&adapter->func_recovery_work);
4022
4023 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004024 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004025 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004026
4027 if (netif_running(netdev)) {
4028 rtnl_lock();
4029 be_close(netdev);
4030 rtnl_unlock();
4031 }
4032 be_clear(adapter);
4033
4034 if (state == pci_channel_io_perm_failure)
4035 return PCI_ERS_RESULT_DISCONNECT;
4036
4037 pci_disable_device(pdev);
4038
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004039 /* The error could cause the FW to trigger a flash debug dump.
4040 * Resetting the card while flash dump is in progress
4041 * can cause it not to recover; wait for it to finish
4042 */
4043 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00004044 return PCI_ERS_RESULT_NEED_RESET;
4045}
4046
4047static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4048{
4049 struct be_adapter *adapter = pci_get_drvdata(pdev);
4050 int status;
4051
4052 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004053 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004054
4055 status = pci_enable_device(pdev);
4056 if (status)
4057 return PCI_ERS_RESULT_DISCONNECT;
4058
4059 pci_set_master(pdev);
4060 pci_set_power_state(pdev, 0);
4061 pci_restore_state(pdev);
4062
4063 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004064 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004065 if (status)
4066 return PCI_ERS_RESULT_DISCONNECT;
4067
4068 return PCI_ERS_RESULT_RECOVERED;
4069}
4070
4071static void be_eeh_resume(struct pci_dev *pdev)
4072{
4073 int status = 0;
4074 struct be_adapter *adapter = pci_get_drvdata(pdev);
4075 struct net_device *netdev = adapter->netdev;
4076
4077 dev_info(&adapter->pdev->dev, "EEH resume\n");
4078
4079 pci_save_state(pdev);
4080
4081 /* tell fw we're ready to fire cmds */
4082 status = be_cmd_fw_init(adapter);
4083 if (status)
4084 goto err;
4085
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004086 status = be_cmd_reset_function(adapter);
4087 if (status)
4088 goto err;
4089
Sathya Perlacf588472010-02-14 21:22:01 +00004090 status = be_setup(adapter);
4091 if (status)
4092 goto err;
4093
4094 if (netif_running(netdev)) {
4095 status = be_open(netdev);
4096 if (status)
4097 goto err;
4098 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004099
4100 schedule_delayed_work(&adapter->func_recovery_work,
4101 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004102 netif_device_attach(netdev);
4103 return;
4104err:
4105 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004106}
4107
4108static struct pci_error_handlers be_eeh_handlers = {
4109 .error_detected = be_eeh_err_detected,
4110 .slot_reset = be_eeh_reset,
4111 .resume = be_eeh_resume,
4112};
4113
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004114static struct pci_driver be_driver = {
4115 .name = DRV_NAME,
4116 .id_table = be_dev_ids,
4117 .probe = be_probe,
4118 .remove = be_remove,
4119 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004120 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004121 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004122 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004123};
4124
4125static int __init be_init_module(void)
4126{
Joe Perches8e95a202009-12-03 07:58:21 +00004127 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4128 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004129 printk(KERN_WARNING DRV_NAME
4130 " : Module param rx_frag_size must be 2048/4096/8192."
4131 " Using 2048\n");
4132 rx_frag_size = 2048;
4133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004134
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004135 return pci_register_driver(&be_driver);
4136}
4137module_init(be_init_module);
4138
4139static void __exit be_exit_module(void)
4140{
4141 pci_unregister_driver(&be_driver);
4142}
4143module_exit(be_exit_module);