blob: e3822788f532f916434e4f1326cf63c290bf8b10 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
238 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000251 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
424 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000425 for_all_rx_queues(adapter, rxo, i) {
426 /* below erx HW counter can actually wrap around after
427 * 65535. Driver accumulates a 32-bit value
428 */
429 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432}
433
Sathya Perlaab1594e2011-07-25 19:10:15 +0000434static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700436{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000437 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700439 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000440 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000441 u64 pkts, bytes;
442 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700443 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700444
Sathya Perla3abcded2010-10-03 22:12:27 -0700445 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447 do {
448 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449 pkts = rx_stats(rxo)->rx_pkts;
450 bytes = rx_stats(rxo)->rx_bytes;
451 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452 stats->rx_packets += pkts;
453 stats->rx_bytes += bytes;
454 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700457 }
458
Sathya Perla3c8def92011-06-12 20:01:58 +0000459 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000460 const struct be_tx_stats *tx_stats = tx_stats(txo);
461 do {
462 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463 pkts = tx_stats(txo)->tx_pkts;
464 bytes = tx_stats(txo)->tx_bytes;
465 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466 stats->tx_packets += pkts;
467 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000468 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700469
470 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000471 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000472 drvs->rx_alignment_symbol_errors +
473 drvs->rx_in_range_errors +
474 drvs->rx_out_range_errors +
475 drvs->rx_frame_too_long +
476 drvs->rx_dropped_too_small +
477 drvs->rx_dropped_too_short +
478 drvs->rx_dropped_header_too_small +
479 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000480 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000483 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000484 drvs->rx_out_range_errors +
485 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000486
Sathya Perlaab1594e2011-07-25 19:10:15 +0000487 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700488
489 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000490 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492 /* receiver fifo overrun */
493 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000494 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000495 drvs->rx_input_fifo_overflow_drop +
496 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000497 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498}
499
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000500void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700502 struct net_device *netdev = adapter->netdev;
503
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000504 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000505 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000506 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000508
509 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510 netif_carrier_on(netdev);
511 else
512 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513}
514
Sathya Perla3c8def92011-06-12 20:01:58 +0000515static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517{
Sathya Perla3c8def92011-06-12 20:01:58 +0000518 struct be_tx_stats *stats = tx_stats(txo);
519
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 stats->tx_reqs++;
522 stats->tx_wrbs += wrb_cnt;
523 stats->tx_bytes += copied;
524 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000527 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700528}
529
530/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000531static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700534 int cnt = (skb->len > skb->data_len);
535
536 cnt += skb_shinfo(skb)->nr_frags;
537
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538 /* to account for hdr wrb */
539 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000540 if (lancer_chip(adapter) || !(cnt & 1)) {
541 *dummy = false;
542 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* add a dummy to make it an even num */
544 cnt++;
545 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000546 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700547 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548 return cnt;
549}
550
551static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552{
553 wrb->frag_pa_hi = upper_32_bits(addr);
554 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556}
557
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000558static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559 struct sk_buff *skb)
560{
561 u8 vlan_prio;
562 u16 vlan_tag;
563
564 vlan_tag = vlan_tx_tag_get(skb);
565 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566 /* If vlan priority provided by OS is NOT in available bmap */
567 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569 adapter->recommended_prio;
570
571 return vlan_tag;
572}
573
Somnath Koturcc4ce022010-10-21 07:11:14 -0700574static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
575 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000577 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700578
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579 memset(hdr, 0, sizeof(*hdr));
580
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000583 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
586 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000587 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000589 if (lancer_chip(adapter) && adapter->sli_family ==
590 LANCER_A0_SLI_FAMILY) {
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592 if (is_tcp_pkt(skb))
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594 tcpcs, hdr, 1);
595 else if (is_udp_pkt(skb))
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
597 udpcs, hdr, 1);
598 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700599 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600 if (is_tcp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
602 else if (is_udp_pkt(skb))
603 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
604 }
605
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700606 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000608 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700609 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 }
611
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
616}
617
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000618static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000619 bool unmap_single)
620{
621 dma_addr_t dma;
622
623 be_dws_le_to_cpu(wrb, sizeof(*wrb));
624
625 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000626 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000627 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000628 dma_unmap_single(dev, dma, wrb->frag_len,
629 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000630 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000631 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000632 }
633}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
Sathya Perla3c8def92011-06-12 20:01:58 +0000635static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
637{
Sathya Perla7101e112010-03-22 20:41:12 +0000638 dma_addr_t busaddr;
639 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000640 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 struct be_eth_wrb *wrb;
643 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000644 bool map_single = false;
645 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 hdr = queue_head_node(txq);
648 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000649 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650
David S. Millerebc8d2a2009-06-09 01:01:31 -0700651 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700652 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000653 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
654 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000655 goto dma_err;
656 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700657 wrb = queue_head_node(txq);
658 wrb_fill(wrb, busaddr, len);
659 be_dws_cpu_to_le(wrb, sizeof(*wrb));
660 queue_head_inc(txq);
661 copied += len;
662 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663
David S. Millerebc8d2a2009-06-09 01:01:31 -0700664 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000665 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700666 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000667 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000668 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000669 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000670 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000672 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700673 be_dws_cpu_to_le(wrb, sizeof(*wrb));
674 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000675 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 }
677
678 if (dummy_wrb) {
679 wrb = queue_head_node(txq);
680 wrb_fill(wrb, 0, 0);
681 be_dws_cpu_to_le(wrb, sizeof(*wrb));
682 queue_head_inc(txq);
683 }
684
Somnath Koturcc4ce022010-10-21 07:11:14 -0700685 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 be_dws_cpu_to_le(hdr, sizeof(*hdr));
687
688 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000689dma_err:
690 txq->head = map_head;
691 while (copied) {
692 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000693 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000694 map_single = false;
695 copied -= wrb->frag_len;
696 queue_head_inc(txq);
697 }
698 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699}
700
Stephen Hemminger613573252009-08-31 19:50:58 +0000701static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700702 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703{
704 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000705 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
706 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700707 u32 wrb_cnt = 0, copied = 0;
708 u32 start = txq->head;
709 bool dummy_wrb, stopped = false;
710
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000711 /* For vlan tagged pkts, BE
712 * 1) calculates checksum even when CSO is not requested
713 * 2) calculates checksum wrongly for padded pkt less than
714 * 60 bytes long.
715 * As a workaround disable TX vlan offloading in such cases.
716 */
717 if (unlikely(vlan_tx_tag_present(skb) &&
718 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
719 skb = skb_share_check(skb, GFP_ATOMIC);
720 if (unlikely(!skb))
721 goto tx_drop;
722
723 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
724 if (unlikely(!skb))
725 goto tx_drop;
726
727 skb->vlan_tci = 0;
728 }
729
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000730 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731
Sathya Perla3c8def92011-06-12 20:01:58 +0000732 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000733 if (copied) {
734 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000735 BUG_ON(txo->sent_skb_list[start]);
736 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 /* Ensure txq has space for the next skb; Else stop the queue
739 * *BEFORE* ringing the tx doorbell, so that we serialze the
740 * tx compls of the current transmit which'll wake up the queue
741 */
Sathya Perla7101e112010-03-22 20:41:12 +0000742 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
744 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000745 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000746 stopped = true;
747 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000749 be_txq_notify(adapter, txq->id, wrb_cnt);
750
Sathya Perla3c8def92011-06-12 20:01:58 +0000751 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000752 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000753 } else {
754 txq->head = start;
755 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000757tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 return NETDEV_TX_OK;
759}
760
761static int be_change_mtu(struct net_device *netdev, int new_mtu)
762{
763 struct be_adapter *adapter = netdev_priv(netdev);
764 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000765 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
766 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 dev_info(&adapter->pdev->dev,
768 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000769 BE_MIN_MTU,
770 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 return -EINVAL;
772 }
773 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
774 netdev->mtu, new_mtu);
775 netdev->mtu = new_mtu;
776 return 0;
777}
778
779/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000780 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
781 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000783static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000785 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 u16 vtag[BE_NUM_VLANS_SUPPORTED];
787 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000788 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000789
790 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000791 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
792 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
793 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000794 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000796 /* No need to further configure vids if in promiscuous mode */
797 if (adapter->promiscuous)
798 return 0;
799
Ajit Khaparde82903e42010-02-09 01:34:57 +0000800 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000802 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700803 if (adapter->vlan_tag[i]) {
804 vtag[ntags] = cpu_to_le16(i);
805 ntags++;
806 }
807 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700808 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700811 status = be_cmd_vlan_config(adapter, adapter->if_handle,
812 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000814
Sathya Perlab31c50a2009-09-17 10:30:13 -0700815 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816}
817
Jiri Pirko8e586132011-12-08 19:52:37 -0500818static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
820 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000821 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000823 if (!be_physfn(adapter)) {
824 status = -EINVAL;
825 goto ret;
826 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000827
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000829 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000830 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500831
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000832 if (!status)
833 adapter->vlans_added++;
834 else
835 adapter->vlan_tag[vid] = 0;
836ret:
837 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Jiri Pirko8e586132011-12-08 19:52:37 -0500840static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841{
842 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000843 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000845 if (!be_physfn(adapter)) {
846 status = -EINVAL;
847 goto ret;
848 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000849
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000851 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000852 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500853
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000854 if (!status)
855 adapter->vlans_added--;
856 else
857 adapter->vlan_tag[vid] = 1;
858ret:
859 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700860}
861
Sathya Perlaa54769f2011-10-24 02:45:00 +0000862static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863{
864 struct be_adapter *adapter = netdev_priv(netdev);
865
866 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000867 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000868 adapter->promiscuous = true;
869 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700870 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000871
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300872 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000873 if (adapter->promiscuous) {
874 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000875 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000876
877 if (adapter->vlans_added)
878 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000879 }
880
Sathya Perlae7b909a2009-11-22 22:01:10 +0000881 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000882 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000883 netdev_mc_count(netdev) > BE_MAX_MC) {
884 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000885 goto done;
886 }
887
Sathya Perla5b8821b2011-08-02 19:57:44 +0000888 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000889done:
890 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700891}
892
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000893static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
894{
895 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000896 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000897 int status;
898
Sathya Perla11ac75e2011-12-13 00:58:50 +0000899 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000900 return -EPERM;
901
Sathya Perla11ac75e2011-12-13 00:58:50 +0000902 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000903 return -EINVAL;
904
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000905 if (lancer_chip(adapter)) {
906 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
907 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000908 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
909 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000910
Sathya Perla11ac75e2011-12-13 00:58:50 +0000911 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
912 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000913 }
914
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000915 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000916 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
917 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000918 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000919 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000920
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000921 return status;
922}
923
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000924static int be_get_vf_config(struct net_device *netdev, int vf,
925 struct ifla_vf_info *vi)
926{
927 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000928 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000929
Sathya Perla11ac75e2011-12-13 00:58:50 +0000930 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000931 return -EPERM;
932
Sathya Perla11ac75e2011-12-13 00:58:50 +0000933 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000934 return -EINVAL;
935
936 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000937 vi->tx_rate = vf_cfg->tx_rate;
938 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000939 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000940 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000941
942 return 0;
943}
944
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000945static int be_set_vf_vlan(struct net_device *netdev,
946 int vf, u16 vlan, u8 qos)
947{
948 struct be_adapter *adapter = netdev_priv(netdev);
949 int status = 0;
950
Sathya Perla11ac75e2011-12-13 00:58:50 +0000951 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000952 return -EPERM;
953
Sathya Perla11ac75e2011-12-13 00:58:50 +0000954 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000955 return -EINVAL;
956
957 if (vlan) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000958 adapter->vf_cfg[vf].vlan_tag = vlan;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000959 adapter->vlans_added++;
960 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000961 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000962 adapter->vlans_added--;
963 }
964
965 status = be_vid_config(adapter, true, vf);
966
967 if (status)
968 dev_info(&adapter->pdev->dev,
969 "VLAN %d config on VF %d failed\n", vlan, vf);
970 return status;
971}
972
Ajit Khapardee1d18732010-07-23 01:52:13 +0000973static int be_set_vf_tx_rate(struct net_device *netdev,
974 int vf, int rate)
975{
976 struct be_adapter *adapter = netdev_priv(netdev);
977 int status = 0;
978
Sathya Perla11ac75e2011-12-13 00:58:50 +0000979 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +0000980 return -EPERM;
981
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000982 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +0000983 return -EINVAL;
984
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000985 if (rate < 100 || rate > 10000) {
986 dev_err(&adapter->pdev->dev,
987 "tx rate must be between 100 and 10000 Mbps\n");
988 return -EINVAL;
989 }
Ajit Khapardee1d18732010-07-23 01:52:13 +0000990
Ajit Khaparde856c4012011-02-11 13:32:32 +0000991 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000992
993 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000994 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +0000995 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000996 else
997 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000998 return status;
999}
1000
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001001static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001003 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001004 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001005 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001006 u64 pkts;
1007 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001008
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001009 if (!eqo->enable_aic) {
1010 eqd = eqo->eqd;
1011 goto modify_eqd;
1012 }
1013
1014 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001015 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001017 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1018
Sathya Perla4097f662009-03-24 16:40:13 -07001019 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001020 if (time_before(now, stats->rx_jiffies)) {
1021 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001022 return;
1023 }
1024
Sathya Perlaac124ff2011-07-25 19:10:14 +00001025 /* Update once a second */
1026 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001027 return;
1028
Sathya Perlaab1594e2011-07-25 19:10:15 +00001029 do {
1030 start = u64_stats_fetch_begin_bh(&stats->sync);
1031 pkts = stats->rx_pkts;
1032 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1033
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001034 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001035 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001036 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001037 eqd = (stats->rx_pps / 110000) << 3;
1038 eqd = min(eqd, eqo->max_eqd);
1039 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001040 if (eqd < 10)
1041 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001042
1043modify_eqd:
1044 if (eqd != eqo->cur_eqd) {
1045 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1046 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001047 }
Sathya Perla4097f662009-03-24 16:40:13 -07001048}
1049
Sathya Perla3abcded2010-10-03 22:12:27 -07001050static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001051 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001052{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001053 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001054
Sathya Perlaab1594e2011-07-25 19:10:15 +00001055 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001056 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001057 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001058 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001059 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001060 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001061 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001062 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001063 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064}
1065
Sathya Perla2e588f82011-03-11 02:49:26 +00001066static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001067{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001068 /* L4 checksum is not reliable for non TCP/UDP packets.
1069 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001070 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1071 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001072}
1073
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001074static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1075 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001077 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001079 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080
Sathya Perla3abcded2010-10-03 22:12:27 -07001081 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 BUG_ON(!rx_page_info->page);
1083
Ajit Khaparde205859a2010-02-09 01:34:21 +00001084 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001085 dma_unmap_page(&adapter->pdev->dev,
1086 dma_unmap_addr(rx_page_info, bus),
1087 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001088 rx_page_info->last_page_user = false;
1089 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090
1091 atomic_dec(&rxq->used);
1092 return rx_page_info;
1093}
1094
1095/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001096static void be_rx_compl_discard(struct be_rx_obj *rxo,
1097 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098{
Sathya Perla3abcded2010-10-03 22:12:27 -07001099 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001101 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001103 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001104 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001105 put_page(page_info->page);
1106 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001107 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001108 }
1109}
1110
1111/*
1112 * skb_fill_rx_data forms a complete skb for an ether frame
1113 * indicated by rxcp.
1114 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001115static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1116 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117{
Sathya Perla3abcded2010-10-03 22:12:27 -07001118 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001120 u16 i, j;
1121 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122 u8 *start;
1123
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001124 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125 start = page_address(page_info->page) + page_info->page_offset;
1126 prefetch(start);
1127
1128 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001129 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130
1131 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001132 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133 memcpy(skb->data, start, hdr_len);
1134 skb->len = curr_frag_len;
1135 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1136 /* Complete packet has now been moved to data */
1137 put_page(page_info->page);
1138 skb->data_len = 0;
1139 skb->tail += curr_frag_len;
1140 } else {
1141 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001142 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143 skb_shinfo(skb)->frags[0].page_offset =
1144 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001145 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001147 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148 skb->tail += hdr_len;
1149 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001150 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151
Sathya Perla2e588f82011-03-11 02:49:26 +00001152 if (rxcp->pkt_size <= rx_frag_size) {
1153 BUG_ON(rxcp->num_rcvd != 1);
1154 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001155 }
1156
1157 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001158 index_inc(&rxcp->rxq_idx, rxq->len);
1159 remaining = rxcp->pkt_size - curr_frag_len;
1160 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001161 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001162 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001164 /* Coalesce all frags from the same physical page in one slot */
1165 if (page_info->page_offset == 0) {
1166 /* Fresh page */
1167 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001168 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001169 skb_shinfo(skb)->frags[j].page_offset =
1170 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001171 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001172 skb_shinfo(skb)->nr_frags++;
1173 } else {
1174 put_page(page_info->page);
1175 }
1176
Eric Dumazet9e903e02011-10-18 21:00:24 +00001177 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 skb->len += curr_frag_len;
1179 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001180 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001181 remaining -= curr_frag_len;
1182 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001183 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001185 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186}
1187
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001188/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001189static void be_rx_compl_process(struct be_rx_obj *rxo,
1190 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001192 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001193 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001195
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001196 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001197 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001198 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001199 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 return;
1201 }
1202
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001203 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001205 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001206 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001207 else
1208 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001210 skb->protocol = eth_type_trans(skb, netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001211 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001212 skb->rxhash = rxcp->rss_hash;
1213
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214
Jiri Pirko343e43c2011-08-25 02:50:51 +00001215 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001216 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1217
1218 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219}
1220
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001221/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001222void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1223 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001225 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001226 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001227 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001228 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 u16 remaining, curr_frag_len;
1230 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001231
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001232 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001233 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001234 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001235 return;
1236 }
1237
Sathya Perla2e588f82011-03-11 02:49:26 +00001238 remaining = rxcp->pkt_size;
1239 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001240 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241
1242 curr_frag_len = min(remaining, rx_frag_size);
1243
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001244 /* Coalesce all frags from the same physical page in one slot */
1245 if (i == 0 || page_info->page_offset == 0) {
1246 /* First frag or Fresh page */
1247 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001248 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001249 skb_shinfo(skb)->frags[j].page_offset =
1250 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001251 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001252 } else {
1253 put_page(page_info->page);
1254 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001255 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001256 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001258 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259 memset(page_info, 0, sizeof(*page_info));
1260 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001261 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001262
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001263 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001264 skb->len = rxcp->pkt_size;
1265 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001266 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001267 if (adapter->netdev->features & NETIF_F_RXHASH)
1268 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001269
Jiri Pirko343e43c2011-08-25 02:50:51 +00001270 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001271 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1272
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001273 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274}
1275
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001276static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1277 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278{
Sathya Perla2e588f82011-03-11 02:49:26 +00001279 rxcp->pkt_size =
1280 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1281 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1282 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1283 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001284 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001285 rxcp->ip_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1287 rxcp->l4_csum =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1289 rxcp->ipv6 =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1291 rxcp->rxq_idx =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1293 rxcp->num_rcvd =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1295 rxcp->pkt_type =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001297 rxcp->rss_hash =
1298 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001299 if (rxcp->vlanf) {
1300 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001301 compl);
1302 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1303 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001304 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001305 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001306}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001308static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1309 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001310{
1311 rxcp->pkt_size =
1312 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1313 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1314 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1315 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001316 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001317 rxcp->ip_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1319 rxcp->l4_csum =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1321 rxcp->ipv6 =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1323 rxcp->rxq_idx =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1325 rxcp->num_rcvd =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1327 rxcp->pkt_type =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001329 rxcp->rss_hash =
1330 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001331 if (rxcp->vlanf) {
1332 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001333 compl);
1334 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1335 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001336 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001337 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001338}
1339
1340static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1341{
1342 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1343 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1344 struct be_adapter *adapter = rxo->adapter;
1345
1346 /* For checking the valid bit it is Ok to use either definition as the
1347 * valid bit is at the same position in both v0 and v1 Rx compl */
1348 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349 return NULL;
1350
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001351 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001352 be_dws_le_to_cpu(compl, sizeof(*compl));
1353
1354 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001355 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001356 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001357 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001358
Sathya Perla15d72182011-03-21 20:49:26 +00001359 if (rxcp->vlanf) {
1360 /* vlanf could be wrongly set in some cards.
1361 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001362 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001363 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001364
Sathya Perla15d72182011-03-21 20:49:26 +00001365 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001366 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001367
Somnath Kotur939cf302011-08-18 21:51:49 -07001368 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001369 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001370 rxcp->vlanf = 0;
1371 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001372
1373 /* As the compl has been parsed, reset it; we wont touch it again */
1374 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375
Sathya Perla3abcded2010-10-03 22:12:27 -07001376 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377 return rxcp;
1378}
1379
Eric Dumazet1829b082011-03-01 05:48:12 +00001380static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001383
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001385 gfp |= __GFP_COMP;
1386 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387}
1388
1389/*
1390 * Allocate a page, split it to fragments of size rx_frag_size and post as
1391 * receive buffers to BE
1392 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001393static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394{
Sathya Perla3abcded2010-10-03 22:12:27 -07001395 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001396 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001397 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 struct page *pagep = NULL;
1399 struct be_eth_rx_d *rxd;
1400 u64 page_dmaaddr = 0, frag_dmaaddr;
1401 u32 posted, page_offset = 0;
1402
Sathya Perla3abcded2010-10-03 22:12:27 -07001403 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1405 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001406 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001408 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 break;
1410 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001411 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1412 0, adapter->big_page_size,
1413 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414 page_info->page_offset = 0;
1415 } else {
1416 get_page(pagep);
1417 page_info->page_offset = page_offset + rx_frag_size;
1418 }
1419 page_offset = page_info->page_offset;
1420 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001421 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1423
1424 rxd = queue_head_node(rxq);
1425 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1426 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427
1428 /* Any space left in the current big page for another frag? */
1429 if ((page_offset + rx_frag_size + rx_frag_size) >
1430 adapter->big_page_size) {
1431 pagep = NULL;
1432 page_info->last_page_user = true;
1433 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001434
1435 prev_page_info = page_info;
1436 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001437 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438 }
1439 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001440 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441
1442 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001444 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001445 } else if (atomic_read(&rxq->used) == 0) {
1446 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001447 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449}
1450
Sathya Perla5fb379e2009-06-18 00:02:59 +00001451static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1454
1455 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1456 return NULL;
1457
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001458 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1460
1461 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1462
1463 queue_tail_inc(tx_cq);
1464 return txcp;
1465}
1466
Sathya Perla3c8def92011-06-12 20:01:58 +00001467static u16 be_tx_compl_process(struct be_adapter *adapter,
1468 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469{
Sathya Perla3c8def92011-06-12 20:01:58 +00001470 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001471 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001472 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001474 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1475 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001477 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001479 sent_skbs[txq->tail] = NULL;
1480
1481 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001482 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001484 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001486 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001487 unmap_tx_frag(&adapter->pdev->dev, wrb,
1488 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001489 unmap_skb_hdr = false;
1490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 num_wrbs++;
1492 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001493 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001496 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497}
1498
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001499/* Return the number of events in the event queue */
1500static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001501{
1502 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001503 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001504
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001505 do {
1506 eqe = queue_tail_node(&eqo->q);
1507 if (eqe->evt == 0)
1508 break;
1509
1510 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001511 eqe->evt = 0;
1512 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001513 queue_tail_inc(&eqo->q);
1514 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001515
1516 return num;
1517}
1518
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001519static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001520{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001521 bool rearm = false;
1522 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001523
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001524 /* Deal with any spurious interrupts that come without events */
1525 if (!num)
1526 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001527
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001528 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001529 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001530 napi_schedule(&eqo->napi);
1531
1532 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001533}
1534
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001535/* Leaves the EQ is disarmed state */
1536static void be_eq_clean(struct be_eq_obj *eqo)
1537{
1538 int num = events_get(eqo);
1539
1540 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1541}
1542
1543static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544{
1545 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001546 struct be_queue_info *rxq = &rxo->q;
1547 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001548 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 u16 tail;
1550
1551 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001552 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001553 be_rx_compl_discard(rxo, rxcp);
1554 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 }
1556
1557 /* Then free posted rx buffer that were not used */
1558 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001559 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001560 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 put_page(page_info->page);
1562 memset(page_info, 0, sizeof(*page_info));
1563 }
1564 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001565 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566}
1567
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001568static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001570 struct be_tx_obj *txo;
1571 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001572 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001573 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001574 struct sk_buff *sent_skb;
1575 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001576 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577
Sathya Perlaa8e91792009-08-10 03:42:43 +00001578 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1579 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001580 pending_txqs = adapter->num_tx_qs;
1581
1582 for_all_tx_queues(adapter, txo, i) {
1583 txq = &txo->q;
1584 while ((txcp = be_tx_compl_get(&txo->cq))) {
1585 end_idx =
1586 AMAP_GET_BITS(struct amap_eth_tx_compl,
1587 wrb_index, txcp);
1588 num_wrbs += be_tx_compl_process(adapter, txo,
1589 end_idx);
1590 cmpl++;
1591 }
1592 if (cmpl) {
1593 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1594 atomic_sub(num_wrbs, &txq->used);
1595 cmpl = 0;
1596 num_wrbs = 0;
1597 }
1598 if (atomic_read(&txq->used) == 0)
1599 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001600 }
1601
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001602 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001603 break;
1604
1605 mdelay(1);
1606 } while (true);
1607
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001608 for_all_tx_queues(adapter, txo, i) {
1609 txq = &txo->q;
1610 if (atomic_read(&txq->used))
1611 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1612 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001613
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001614 /* free posted tx for which compls will never arrive */
1615 while (atomic_read(&txq->used)) {
1616 sent_skb = txo->sent_skb_list[txq->tail];
1617 end_idx = txq->tail;
1618 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1619 &dummy_wrb);
1620 index_adv(&end_idx, num_wrbs - 1, txq->len);
1621 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1622 atomic_sub(num_wrbs, &txq->used);
1623 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001624 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625}
1626
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001627static void be_evt_queues_destroy(struct be_adapter *adapter)
1628{
1629 struct be_eq_obj *eqo;
1630 int i;
1631
1632 for_all_evt_queues(adapter, eqo, i) {
1633 be_eq_clean(eqo);
1634 if (eqo->q.created)
1635 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1636 be_queue_free(adapter, &eqo->q);
1637 }
1638}
1639
1640static int be_evt_queues_create(struct be_adapter *adapter)
1641{
1642 struct be_queue_info *eq;
1643 struct be_eq_obj *eqo;
1644 int i, rc;
1645
1646 adapter->num_evt_qs = num_irqs(adapter);
1647
1648 for_all_evt_queues(adapter, eqo, i) {
1649 eqo->adapter = adapter;
1650 eqo->tx_budget = BE_TX_BUDGET;
1651 eqo->idx = i;
1652 eqo->max_eqd = BE_MAX_EQD;
1653 eqo->enable_aic = true;
1654
1655 eq = &eqo->q;
1656 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1657 sizeof(struct be_eq_entry));
1658 if (rc)
1659 return rc;
1660
1661 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1662 if (rc)
1663 return rc;
1664 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001665 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001666}
1667
Sathya Perla5fb379e2009-06-18 00:02:59 +00001668static void be_mcc_queues_destroy(struct be_adapter *adapter)
1669{
1670 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001671
Sathya Perla8788fdc2009-07-27 22:52:03 +00001672 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001673 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001674 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001675 be_queue_free(adapter, q);
1676
Sathya Perla8788fdc2009-07-27 22:52:03 +00001677 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001678 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001679 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001680 be_queue_free(adapter, q);
1681}
1682
1683/* Must be called only after TX qs are created as MCC shares TX EQ */
1684static int be_mcc_queues_create(struct be_adapter *adapter)
1685{
1686 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001687
Sathya Perla8788fdc2009-07-27 22:52:03 +00001688 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001689 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001690 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001691 goto err;
1692
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001693 /* Use the default EQ for MCC completions */
1694 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001695 goto mcc_cq_free;
1696
Sathya Perla8788fdc2009-07-27 22:52:03 +00001697 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001698 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1699 goto mcc_cq_destroy;
1700
Sathya Perla8788fdc2009-07-27 22:52:03 +00001701 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001702 goto mcc_q_free;
1703
1704 return 0;
1705
1706mcc_q_free:
1707 be_queue_free(adapter, q);
1708mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001709 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001710mcc_cq_free:
1711 be_queue_free(adapter, cq);
1712err:
1713 return -1;
1714}
1715
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716static void be_tx_queues_destroy(struct be_adapter *adapter)
1717{
1718 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001719 struct be_tx_obj *txo;
1720 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721
Sathya Perla3c8def92011-06-12 20:01:58 +00001722 for_all_tx_queues(adapter, txo, i) {
1723 q = &txo->q;
1724 if (q->created)
1725 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1726 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727
Sathya Perla3c8def92011-06-12 20:01:58 +00001728 q = &txo->cq;
1729 if (q->created)
1730 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1731 be_queue_free(adapter, q);
1732 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733}
1734
Sathya Perladafc0fe2011-10-24 02:45:02 +00001735static int be_num_txqs_want(struct be_adapter *adapter)
1736{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001737 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001738 lancer_chip(adapter) || !be_physfn(adapter) ||
1739 adapter->generation == BE_GEN2)
1740 return 1;
1741 else
1742 return MAX_TX_QS;
1743}
1744
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001745static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001747 struct be_queue_info *cq, *eq;
1748 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001749 struct be_tx_obj *txo;
1750 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751
Sathya Perladafc0fe2011-10-24 02:45:02 +00001752 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001753 if (adapter->num_tx_qs != MAX_TX_QS) {
1754 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001755 netif_set_real_num_tx_queues(adapter->netdev,
1756 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001757 rtnl_unlock();
1758 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001759
Sathya Perla3c8def92011-06-12 20:01:58 +00001760 for_all_tx_queues(adapter, txo, i) {
1761 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001762 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1763 sizeof(struct be_eth_tx_compl));
1764 if (status)
1765 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001767 /* If num_evt_qs is less than num_tx_qs, then more than
1768 * one txq share an eq
1769 */
1770 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1771 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1772 if (status)
1773 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001774 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776}
1777
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001778static int be_tx_qs_create(struct be_adapter *adapter)
1779{
1780 struct be_tx_obj *txo;
1781 int i, status;
1782
1783 for_all_tx_queues(adapter, txo, i) {
1784 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1785 sizeof(struct be_eth_wrb));
1786 if (status)
1787 return status;
1788
1789 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1790 if (status)
1791 return status;
1792 }
1793
1794 return 0;
1795}
1796
1797static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798{
1799 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001800 struct be_rx_obj *rxo;
1801 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802
Sathya Perla3abcded2010-10-03 22:12:27 -07001803 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001804 q = &rxo->cq;
1805 if (q->created)
1806 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1807 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809}
1810
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001811static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001812{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001813 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001814 struct be_rx_obj *rxo;
1815 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001817 /* We'll create as many RSS rings as there are irqs.
1818 * But when there's only one irq there's no use creating RSS rings
1819 */
1820 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1821 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001822
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001824 for_all_rx_queues(adapter, rxo, i) {
1825 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001826 cq = &rxo->cq;
1827 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1828 sizeof(struct be_eth_rx_compl));
1829 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001830 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001832 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1833 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001834 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001835 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001836 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001838 if (adapter->num_rx_qs != MAX_RX_QS)
1839 dev_info(&adapter->pdev->dev,
1840 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001842 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001843}
1844
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845static irqreturn_t be_intx(int irq, void *dev)
1846{
1847 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001848 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001850 /* With INTx only one EQ is used */
1851 num_evts = event_handle(&adapter->eq_obj[0]);
1852 if (num_evts)
1853 return IRQ_HANDLED;
1854 else
1855 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856}
1857
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001858static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001860 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001862 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 return IRQ_HANDLED;
1864}
1865
Sathya Perla2e588f82011-03-11 02:49:26 +00001866static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867{
Sathya Perla2e588f82011-03-11 02:49:26 +00001868 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869}
1870
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001871static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1872 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873{
Sathya Perla3abcded2010-10-03 22:12:27 -07001874 struct be_adapter *adapter = rxo->adapter;
1875 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001876 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877 u32 work_done;
1878
1879 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001880 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 if (!rxcp)
1882 break;
1883
Sathya Perla12004ae2011-08-02 19:57:46 +00001884 /* Is it a flush compl that has no data */
1885 if (unlikely(rxcp->num_rcvd == 0))
1886 goto loop_continue;
1887
1888 /* Discard compl with partial DMA Lancer B0 */
1889 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001890 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001891 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001892 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001893
Sathya Perla12004ae2011-08-02 19:57:46 +00001894 /* On BE drop pkts that arrive due to imperfect filtering in
1895 * promiscuous mode on some skews
1896 */
1897 if (unlikely(rxcp->port != adapter->port_num &&
1898 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001899 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001900 goto loop_continue;
1901 }
1902
1903 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001904 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001905 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001906 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001907loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001908 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909 }
1910
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001911 if (work_done) {
1912 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001913
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1915 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 return work_done;
1919}
1920
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001921static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1922 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001927 for (work_done = 0; work_done < budget; work_done++) {
1928 txcp = be_tx_compl_get(&txo->cq);
1929 if (!txcp)
1930 break;
1931 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00001932 AMAP_GET_BITS(struct amap_eth_tx_compl,
1933 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001934 }
1935
1936 if (work_done) {
1937 be_cq_notify(adapter, txo->cq.id, true, work_done);
1938 atomic_sub(num_wrbs, &txo->q.used);
1939
1940 /* As Tx wrbs have been freed up, wake up netdev queue
1941 * if it was stopped due to lack of tx wrbs. */
1942 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1943 atomic_read(&txo->q.used) < txo->q.len / 2) {
1944 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00001945 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001946
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001947 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1948 tx_stats(txo)->tx_compl += work_done;
1949 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1950 }
1951 return (work_done < budget); /* Done */
1952}
Sathya Perla3c8def92011-06-12 20:01:58 +00001953
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001954int be_poll(struct napi_struct *napi, int budget)
1955{
1956 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1957 struct be_adapter *adapter = eqo->adapter;
1958 int max_work = 0, work, i;
1959 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00001960
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001961 /* Process all TXQs serviced by this EQ */
1962 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1963 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1964 eqo->tx_budget, i);
1965 if (!tx_done)
1966 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967 }
1968
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969 /* This loop will iterate twice for EQ0 in which
1970 * completions of the last RXQ (default one) are also processed
1971 * For other EQs the loop iterates only once
1972 */
1973 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
1974 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
1975 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001976 }
1977
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 if (is_mcc_eqo(eqo))
1979 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001980
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981 if (max_work < budget) {
1982 napi_complete(napi);
1983 be_eq_notify(adapter, eqo->q.id, true, false, 0);
1984 } else {
1985 /* As we'll continue in polling mode, count and clear events */
1986 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00001987 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001988 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989}
1990
Ajit Khaparded053de92010-09-03 06:23:30 +00001991void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001992{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001993 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1994 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00001995 u32 i;
1996
Sathya Perla72f02482011-11-10 19:17:58 +00001997 if (adapter->eeh_err || adapter->ue_detected)
1998 return;
1999
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002000 if (lancer_chip(adapter)) {
2001 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2002 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2003 sliport_err1 = ioread32(adapter->db +
2004 SLIPORT_ERROR1_OFFSET);
2005 sliport_err2 = ioread32(adapter->db +
2006 SLIPORT_ERROR2_OFFSET);
2007 }
2008 } else {
2009 pci_read_config_dword(adapter->pdev,
2010 PCICFG_UE_STATUS_LOW, &ue_lo);
2011 pci_read_config_dword(adapter->pdev,
2012 PCICFG_UE_STATUS_HIGH, &ue_hi);
2013 pci_read_config_dword(adapter->pdev,
2014 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2015 pci_read_config_dword(adapter->pdev,
2016 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002017
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002018 ue_lo = (ue_lo & (~ue_lo_mask));
2019 ue_hi = (ue_hi & (~ue_hi_mask));
2020 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002021
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002022 if (ue_lo || ue_hi ||
2023 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002024 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002025 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002026 dev_err(&adapter->pdev->dev,
2027 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002028 }
2029
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002030 if (ue_lo) {
2031 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2032 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002033 dev_err(&adapter->pdev->dev,
2034 "UE: %s bit set\n", ue_status_low_desc[i]);
2035 }
2036 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002037 if (ue_hi) {
2038 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2039 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002040 dev_err(&adapter->pdev->dev,
2041 "UE: %s bit set\n", ue_status_hi_desc[i]);
2042 }
2043 }
2044
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002045 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2046 dev_err(&adapter->pdev->dev,
2047 "sliport status 0x%x\n", sliport_status);
2048 dev_err(&adapter->pdev->dev,
2049 "sliport error1 0x%x\n", sliport_err1);
2050 dev_err(&adapter->pdev->dev,
2051 "sliport error2 0x%x\n", sliport_err2);
2052 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002053}
2054
Sathya Perla8d56ff12009-11-22 22:02:26 +00002055static void be_msix_disable(struct be_adapter *adapter)
2056{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002057 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002058 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002059 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002060 }
2061}
2062
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002063static uint be_num_rss_want(struct be_adapter *adapter)
2064{
2065 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2066 adapter->num_vfs == 0 && be_physfn(adapter) &&
2067 !be_is_mc(adapter))
2068 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2069 else
2070 return 0;
2071}
2072
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073static void be_msix_enable(struct be_adapter *adapter)
2074{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002075#define BE_MIN_MSIX_VECTORS 1
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002076 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002078 /* If RSS queues are not used, need a vec for default RX Q */
2079 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2080 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002081
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002082 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002083 adapter->msix_entries[i].entry = i;
2084
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002085 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002086 if (status == 0) {
2087 goto done;
2088 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002089 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002090 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002091 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002092 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002093 }
2094 return;
2095done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002096 adapter->num_msix_vec = num_vec;
2097 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098}
2099
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002100static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002101{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002102 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002103
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002104#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002105 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002106 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002107 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002108
2109 pos = pci_find_ext_capability(adapter->pdev,
2110 PCI_EXT_CAP_ID_SRIOV);
2111 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002112 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002113
Sathya Perla11ac75e2011-12-13 00:58:50 +00002114 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2115 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002116 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002117 "Device supports %d VFs and not %d\n",
2118 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002119
Sathya Perla11ac75e2011-12-13 00:58:50 +00002120 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2121 if (status)
2122 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002123
Sathya Perla11ac75e2011-12-13 00:58:50 +00002124 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002125 adapter->vf_cfg = kcalloc(num_vfs,
2126 sizeof(struct be_vf_cfg),
2127 GFP_KERNEL);
2128 if (!adapter->vf_cfg)
2129 return -ENOMEM;
2130 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002131 }
2132#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002133 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002134}
2135
2136static void be_sriov_disable(struct be_adapter *adapter)
2137{
2138#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002139 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002140 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002141 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002142 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002143 }
2144#endif
2145}
2146
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002147static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002148 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002149{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002150 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151}
2152
2153static int be_msix_register(struct be_adapter *adapter)
2154{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002155 struct net_device *netdev = adapter->netdev;
2156 struct be_eq_obj *eqo;
2157 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002158
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002159 for_all_evt_queues(adapter, eqo, i) {
2160 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2161 vec = be_msix_vec_get(adapter, eqo);
2162 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002163 if (status)
2164 goto err_msix;
2165 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002166
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002168err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2170 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2171 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2172 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002173 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002174 return status;
2175}
2176
2177static int be_irq_register(struct be_adapter *adapter)
2178{
2179 struct net_device *netdev = adapter->netdev;
2180 int status;
2181
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002182 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183 status = be_msix_register(adapter);
2184 if (status == 0)
2185 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002186 /* INTx is not supported for VF */
2187 if (!be_physfn(adapter))
2188 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189 }
2190
2191 /* INTx */
2192 netdev->irq = adapter->pdev->irq;
2193 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2194 adapter);
2195 if (status) {
2196 dev_err(&adapter->pdev->dev,
2197 "INTx request IRQ failed - err %d\n", status);
2198 return status;
2199 }
2200done:
2201 adapter->isr_registered = true;
2202 return 0;
2203}
2204
2205static void be_irq_unregister(struct be_adapter *adapter)
2206{
2207 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002209 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002210
2211 if (!adapter->isr_registered)
2212 return;
2213
2214 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002215 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216 free_irq(netdev->irq, adapter);
2217 goto done;
2218 }
2219
2220 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002221 for_all_evt_queues(adapter, eqo, i)
2222 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002223
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224done:
2225 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226}
2227
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002228static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002229{
2230 struct be_queue_info *q;
2231 struct be_rx_obj *rxo;
2232 int i;
2233
2234 for_all_rx_queues(adapter, rxo, i) {
2235 q = &rxo->q;
2236 if (q->created) {
2237 be_cmd_rxq_destroy(adapter, q);
2238 /* After the rxq is invalidated, wait for a grace time
2239 * of 1ms for all dma to end and the flush compl to
2240 * arrive
2241 */
2242 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002243 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002244 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002245 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002246 }
2247}
2248
Sathya Perla889cd4b2010-05-30 23:33:45 +00002249static int be_close(struct net_device *netdev)
2250{
2251 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252 struct be_eq_obj *eqo;
2253 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002254
Sathya Perla889cd4b2010-05-30 23:33:45 +00002255 be_async_mcc_disable(adapter);
2256
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002257 if (!lancer_chip(adapter))
2258 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002259
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002260 for_all_evt_queues(adapter, eqo, i) {
2261 napi_disable(&eqo->napi);
2262 if (msix_enabled(adapter))
2263 synchronize_irq(be_msix_vec_get(adapter, eqo));
2264 else
2265 synchronize_irq(netdev->irq);
2266 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002267 }
2268
Sathya Perla889cd4b2010-05-30 23:33:45 +00002269 be_irq_unregister(adapter);
2270
Sathya Perla889cd4b2010-05-30 23:33:45 +00002271 /* Wait for all pending tx completions to arrive so that
2272 * all tx skbs are freed.
2273 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002274 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002275
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002276 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002277 return 0;
2278}
2279
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002280static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002281{
2282 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002283 int rc, i, j;
2284 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002285
2286 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002287 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2288 sizeof(struct be_eth_rx_d));
2289 if (rc)
2290 return rc;
2291 }
2292
2293 /* The FW would like the default RXQ to be created first */
2294 rxo = default_rxo(adapter);
2295 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2296 adapter->if_handle, false, &rxo->rss_id);
2297 if (rc)
2298 return rc;
2299
2300 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002301 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002302 rx_frag_size, adapter->if_handle,
2303 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002304 if (rc)
2305 return rc;
2306 }
2307
2308 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002309 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2310 for_all_rss_queues(adapter, rxo, i) {
2311 if ((j + i) >= 128)
2312 break;
2313 rsstable[j + i] = rxo->rss_id;
2314 }
2315 }
2316 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002317 if (rc)
2318 return rc;
2319 }
2320
2321 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002323 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002324 return 0;
2325}
2326
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327static int be_open(struct net_device *netdev)
2328{
2329 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002330 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002331 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002332 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002333 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002334 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002335
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002336 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002337 if (status)
2338 goto err;
2339
Sathya Perla5fb379e2009-06-18 00:02:59 +00002340 be_irq_register(adapter);
2341
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002342 if (!lancer_chip(adapter))
2343 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002344
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002345 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002346 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002347
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002348 for_all_tx_queues(adapter, txo, i)
2349 be_cq_notify(adapter, txo->cq.id, true, 0);
2350
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002351 be_async_mcc_enable(adapter);
2352
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353 for_all_evt_queues(adapter, eqo, i) {
2354 napi_enable(&eqo->napi);
2355 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2356 }
2357
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002358 status = be_cmd_link_status_query(adapter, NULL, NULL,
2359 &link_status, 0);
2360 if (!status)
2361 be_link_status_update(adapter, link_status);
2362
Sathya Perla889cd4b2010-05-30 23:33:45 +00002363 return 0;
2364err:
2365 be_close(adapter->netdev);
2366 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002367}
2368
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002369static int be_setup_wol(struct be_adapter *adapter, bool enable)
2370{
2371 struct be_dma_mem cmd;
2372 int status = 0;
2373 u8 mac[ETH_ALEN];
2374
2375 memset(mac, 0, ETH_ALEN);
2376
2377 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002378 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2379 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002380 if (cmd.va == NULL)
2381 return -1;
2382 memset(cmd.va, 0, cmd.size);
2383
2384 if (enable) {
2385 status = pci_write_config_dword(adapter->pdev,
2386 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2387 if (status) {
2388 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002389 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002390 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2391 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002392 return status;
2393 }
2394 status = be_cmd_enable_magic_wol(adapter,
2395 adapter->netdev->dev_addr, &cmd);
2396 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2397 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2398 } else {
2399 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2400 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2401 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2402 }
2403
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002404 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002405 return status;
2406}
2407
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002408/*
2409 * Generate a seed MAC address from the PF MAC Address using jhash.
2410 * MAC Address for VFs are assigned incrementally starting from the seed.
2411 * These addresses are programmed in the ASIC by the PF and the VF driver
2412 * queries for the MAC address during its probe.
2413 */
2414static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2415{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002416 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002417 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002418 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002419 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002420
2421 be_vf_eth_addr_generate(adapter, mac);
2422
Sathya Perla11ac75e2011-12-13 00:58:50 +00002423 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002424 if (lancer_chip(adapter)) {
2425 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2426 } else {
2427 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002428 vf_cfg->if_handle,
2429 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002430 }
2431
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002432 if (status)
2433 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002434 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002435 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002436 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002437
2438 mac[5] += 1;
2439 }
2440 return status;
2441}
2442
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002443static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002444{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002445 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002446 u32 vf;
2447
Sathya Perla11ac75e2011-12-13 00:58:50 +00002448 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002449 if (lancer_chip(adapter))
2450 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2451 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002452 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2453 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002454
Sathya Perla11ac75e2011-12-13 00:58:50 +00002455 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2456 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002457}
2458
Sathya Perlaa54769f2011-10-24 02:45:00 +00002459static int be_clear(struct be_adapter *adapter)
2460{
Sathya Perla191eb752012-02-23 18:50:13 +00002461 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2462 cancel_delayed_work_sync(&adapter->work);
2463 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2464 }
2465
Sathya Perla11ac75e2011-12-13 00:58:50 +00002466 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002467 be_vf_clear(adapter);
2468
2469 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002470
2471 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002472 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002473 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002474 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002475
2476 /* tell fw we're done with firing cmds */
2477 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002478
2479 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002480 return 0;
2481}
2482
Sathya Perla30128032011-11-10 19:17:57 +00002483static void be_vf_setup_init(struct be_adapter *adapter)
2484{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002485 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002486 int vf;
2487
Sathya Perla11ac75e2011-12-13 00:58:50 +00002488 for_all_vfs(adapter, vf_cfg, vf) {
2489 vf_cfg->if_handle = -1;
2490 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002491 }
2492}
2493
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002494static int be_vf_setup(struct be_adapter *adapter)
2495{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002496 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002497 u32 cap_flags, en_flags, vf;
2498 u16 lnk_speed;
2499 int status;
2500
Sathya Perla30128032011-11-10 19:17:57 +00002501 be_vf_setup_init(adapter);
2502
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002503 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2504 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002506 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002507 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002508 if (status)
2509 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002510 }
2511
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002512 status = be_vf_eth_addr_config(adapter);
2513 if (status)
2514 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002515
Sathya Perla11ac75e2011-12-13 00:58:50 +00002516 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002517 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002518 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002519 if (status)
2520 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002521 vf_cfg->tx_rate = lnk_speed * 10;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002522 }
2523 return 0;
2524err:
2525 return status;
2526}
2527
Sathya Perla30128032011-11-10 19:17:57 +00002528static void be_setup_init(struct be_adapter *adapter)
2529{
2530 adapter->vlan_prio_bmap = 0xff;
2531 adapter->link_speed = -1;
2532 adapter->if_handle = -1;
2533 adapter->be3_native = false;
2534 adapter->promiscuous = false;
2535 adapter->eq_next_idx = 0;
2536}
2537
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002538static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002539{
2540 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002541 int status;
2542 bool pmac_id_active;
2543
2544 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2545 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002546 if (status != 0)
2547 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002548
2549 if (pmac_id_active) {
2550 status = be_cmd_mac_addr_query(adapter, mac,
2551 MAC_ADDRESS_TYPE_NETWORK,
2552 false, adapter->if_handle, pmac_id);
2553
2554 if (!status)
2555 adapter->pmac_id = pmac_id;
2556 } else {
2557 status = be_cmd_pmac_add(adapter, mac,
2558 adapter->if_handle, &adapter->pmac_id, 0);
2559 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002560do_none:
2561 return status;
2562}
2563
Sathya Perla5fb379e2009-06-18 00:02:59 +00002564static int be_setup(struct be_adapter *adapter)
2565{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002566 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002567 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002568 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002569 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002570 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571
Sathya Perla30128032011-11-10 19:17:57 +00002572 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002573
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002574 be_cmd_req_native_mode(adapter);
2575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 be_msix_enable(adapter);
2577
2578 status = be_evt_queues_create(adapter);
2579 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002580 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002581
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002582 status = be_tx_cqs_create(adapter);
2583 if (status)
2584 goto err;
2585
2586 status = be_rx_cqs_create(adapter);
2587 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002588 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002589
Sathya Perla5fb379e2009-06-18 00:02:59 +00002590 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002591 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002592 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002594 memset(mac, 0, ETH_ALEN);
2595 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002596 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002597 if (status)
2598 return status;
2599 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2600 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2601
2602 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2603 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2604 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002605 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2606
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002607 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2608 cap_flags |= BE_IF_FLAGS_RSS;
2609 en_flags |= BE_IF_FLAGS_RSS;
2610 }
2611 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2612 netdev->dev_addr, &adapter->if_handle,
2613 &adapter->pmac_id, 0);
2614 if (status != 0)
2615 goto err;
2616
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002617 /* The VF's permanent mac queried from card is incorrect.
2618 * For BEx: Query the mac configued by the PF using if_handle
2619 * For Lancer: Get and use mac_list to obtain mac address.
2620 */
2621 if (!be_physfn(adapter)) {
2622 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002623 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002624 else
2625 status = be_cmd_mac_addr_query(adapter, mac,
2626 MAC_ADDRESS_TYPE_NETWORK, false,
2627 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002628 if (!status) {
2629 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2630 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2631 }
2632 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002633
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002634 status = be_tx_qs_create(adapter);
2635 if (status)
2636 goto err;
2637
Sathya Perla04b71172011-09-27 13:30:27 -04002638 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002639
Sathya Perlaa54769f2011-10-24 02:45:00 +00002640 status = be_vid_config(adapter, false, 0);
2641 if (status)
2642 goto err;
2643
2644 be_set_rx_mode(adapter->netdev);
2645
2646 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002647 /* For Lancer: It is legal for this cmd to fail on VF */
2648 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002649 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002650
Sathya Perlaa54769f2011-10-24 02:45:00 +00002651 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2652 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2653 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002654 /* For Lancer: It is legal for this cmd to fail on VF */
2655 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002656 goto err;
2657 }
2658
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002659 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002660
Sathya Perla11ac75e2011-12-13 00:58:50 +00002661 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002662 status = be_vf_setup(adapter);
2663 if (status)
2664 goto err;
2665 }
2666
Sathya Perla191eb752012-02-23 18:50:13 +00002667 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2668 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2669
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002670 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002671err:
2672 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002673 return status;
2674}
2675
Ivan Vecera66268732011-12-08 01:31:21 +00002676#ifdef CONFIG_NET_POLL_CONTROLLER
2677static void be_netpoll(struct net_device *netdev)
2678{
2679 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002680 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002681 int i;
2682
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002683 for_all_evt_queues(adapter, eqo, i)
2684 event_handle(eqo);
2685
2686 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002687}
2688#endif
2689
Ajit Khaparde84517482009-09-04 03:12:16 +00002690#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002691static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002692 const u8 *p, u32 img_start, int image_size,
2693 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002694{
2695 u32 crc_offset;
2696 u8 flashed_crc[4];
2697 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002698
2699 crc_offset = hdr_size + img_start + image_size - 4;
2700
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002701 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002702
2703 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002704 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002705 if (status) {
2706 dev_err(&adapter->pdev->dev,
2707 "could not get crc from flash, not flashing redboot\n");
2708 return false;
2709 }
2710
2711 /*update redboot only if crc does not match*/
2712 if (!memcmp(flashed_crc, p, 4))
2713 return false;
2714 else
2715 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002716}
2717
Sathya Perla306f1342011-08-02 19:57:45 +00002718static bool phy_flashing_required(struct be_adapter *adapter)
2719{
2720 int status = 0;
2721 struct be_phy_info phy_info;
2722
2723 status = be_cmd_get_phy_info(adapter, &phy_info);
2724 if (status)
2725 return false;
2726 if ((phy_info.phy_type == TN_8022) &&
2727 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2728 return true;
2729 }
2730 return false;
2731}
2732
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002733static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002734 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002735 struct be_dma_mem *flash_cmd, int num_of_images)
2736
Ajit Khaparde84517482009-09-04 03:12:16 +00002737{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002738 int status = 0, i, filehdr_size = 0;
2739 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002740 int num_bytes;
2741 const u8 *p = fw->data;
2742 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002743 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002744 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002745
Sathya Perla306f1342011-08-02 19:57:45 +00002746 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002747 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2748 FLASH_IMAGE_MAX_SIZE_g3},
2749 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2750 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2751 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2752 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2753 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2754 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2755 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2756 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2757 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2758 FLASH_IMAGE_MAX_SIZE_g3},
2759 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2760 FLASH_IMAGE_MAX_SIZE_g3},
2761 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002762 FLASH_IMAGE_MAX_SIZE_g3},
2763 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002764 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2765 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2766 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002767 };
Joe Perches215faf92010-12-21 02:16:10 -08002768 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002769 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2770 FLASH_IMAGE_MAX_SIZE_g2},
2771 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2772 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2773 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2774 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2775 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2776 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2777 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2778 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2779 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2780 FLASH_IMAGE_MAX_SIZE_g2},
2781 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2782 FLASH_IMAGE_MAX_SIZE_g2},
2783 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2784 FLASH_IMAGE_MAX_SIZE_g2}
2785 };
2786
2787 if (adapter->generation == BE_GEN3) {
2788 pflashcomp = gen3_flash_types;
2789 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002790 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002791 } else {
2792 pflashcomp = gen2_flash_types;
2793 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002794 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002795 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002796 for (i = 0; i < num_comp; i++) {
2797 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2798 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2799 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002800 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2801 if (!phy_flashing_required(adapter))
2802 continue;
2803 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002804 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2805 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002806 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2807 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002808 continue;
2809 p = fw->data;
2810 p += filehdr_size + pflashcomp[i].offset
2811 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002812 if (p + pflashcomp[i].size > fw->data + fw->size)
2813 return -1;
2814 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002815 while (total_bytes) {
2816 if (total_bytes > 32*1024)
2817 num_bytes = 32*1024;
2818 else
2819 num_bytes = total_bytes;
2820 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002821 if (!total_bytes) {
2822 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2823 flash_op = FLASHROM_OPER_PHY_FLASH;
2824 else
2825 flash_op = FLASHROM_OPER_FLASH;
2826 } else {
2827 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2828 flash_op = FLASHROM_OPER_PHY_SAVE;
2829 else
2830 flash_op = FLASHROM_OPER_SAVE;
2831 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002832 memcpy(req->params.data_buf, p, num_bytes);
2833 p += num_bytes;
2834 status = be_cmd_write_flashrom(adapter, flash_cmd,
2835 pflashcomp[i].optype, flash_op, num_bytes);
2836 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002837 if ((status == ILLEGAL_IOCTL_REQ) &&
2838 (pflashcomp[i].optype ==
2839 IMG_TYPE_PHY_FW))
2840 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002841 dev_err(&adapter->pdev->dev,
2842 "cmd to write to flash rom failed.\n");
2843 return -1;
2844 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002845 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002846 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002847 return 0;
2848}
2849
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002850static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2851{
2852 if (fhdr == NULL)
2853 return 0;
2854 if (fhdr->build[0] == '3')
2855 return BE_GEN3;
2856 else if (fhdr->build[0] == '2')
2857 return BE_GEN2;
2858 else
2859 return 0;
2860}
2861
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002862static int lancer_fw_download(struct be_adapter *adapter,
2863 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002864{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002865#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2866#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2867 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002868 const u8 *data_ptr = NULL;
2869 u8 *dest_image_ptr = NULL;
2870 size_t image_size = 0;
2871 u32 chunk_size = 0;
2872 u32 data_written = 0;
2873 u32 offset = 0;
2874 int status = 0;
2875 u8 add_status = 0;
2876
2877 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2878 dev_err(&adapter->pdev->dev,
2879 "FW Image not properly aligned. "
2880 "Length must be 4 byte aligned.\n");
2881 status = -EINVAL;
2882 goto lancer_fw_exit;
2883 }
2884
2885 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2886 + LANCER_FW_DOWNLOAD_CHUNK;
2887 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2888 &flash_cmd.dma, GFP_KERNEL);
2889 if (!flash_cmd.va) {
2890 status = -ENOMEM;
2891 dev_err(&adapter->pdev->dev,
2892 "Memory allocation failure while flashing\n");
2893 goto lancer_fw_exit;
2894 }
2895
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002896 dest_image_ptr = flash_cmd.va +
2897 sizeof(struct lancer_cmd_req_write_object);
2898 image_size = fw->size;
2899 data_ptr = fw->data;
2900
2901 while (image_size) {
2902 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2903
2904 /* Copy the image chunk content. */
2905 memcpy(dest_image_ptr, data_ptr, chunk_size);
2906
2907 status = lancer_cmd_write_object(adapter, &flash_cmd,
2908 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2909 &data_written, &add_status);
2910
2911 if (status)
2912 break;
2913
2914 offset += data_written;
2915 data_ptr += data_written;
2916 image_size -= data_written;
2917 }
2918
2919 if (!status) {
2920 /* Commit the FW written */
2921 status = lancer_cmd_write_object(adapter, &flash_cmd,
2922 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2923 &data_written, &add_status);
2924 }
2925
2926 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2927 flash_cmd.dma);
2928 if (status) {
2929 dev_err(&adapter->pdev->dev,
2930 "Firmware load error. "
2931 "Status code: 0x%x Additional Status: 0x%x\n",
2932 status, add_status);
2933 goto lancer_fw_exit;
2934 }
2935
2936 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2937lancer_fw_exit:
2938 return status;
2939}
2940
2941static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2942{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002943 struct flash_file_hdr_g2 *fhdr;
2944 struct flash_file_hdr_g3 *fhdr3;
2945 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002946 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002947 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002948 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002949
2950 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002951 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002952
Ajit Khaparde84517482009-09-04 03:12:16 +00002953 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002954 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2955 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002956 if (!flash_cmd.va) {
2957 status = -ENOMEM;
2958 dev_err(&adapter->pdev->dev,
2959 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002960 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002961 }
2962
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002963 if ((adapter->generation == BE_GEN3) &&
2964 (get_ufigen_type(fhdr) == BE_GEN3)) {
2965 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002966 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2967 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002968 img_hdr_ptr = (struct image_hdr *) (fw->data +
2969 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002970 i * sizeof(struct image_hdr)));
2971 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2972 status = be_flash_data(adapter, fw, &flash_cmd,
2973 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002974 }
2975 } else if ((adapter->generation == BE_GEN2) &&
2976 (get_ufigen_type(fhdr) == BE_GEN2)) {
2977 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2978 } else {
2979 dev_err(&adapter->pdev->dev,
2980 "UFI and Interface are not compatible for flashing\n");
2981 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002982 }
2983
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002984 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2985 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002986 if (status) {
2987 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002988 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002989 }
2990
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002991 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002992
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002993be_fw_exit:
2994 return status;
2995}
2996
2997int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2998{
2999 const struct firmware *fw;
3000 int status;
3001
3002 if (!netif_running(adapter->netdev)) {
3003 dev_err(&adapter->pdev->dev,
3004 "Firmware load not allowed (interface is down)\n");
3005 return -1;
3006 }
3007
3008 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3009 if (status)
3010 goto fw_exit;
3011
3012 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3013
3014 if (lancer_chip(adapter))
3015 status = lancer_fw_download(adapter, fw);
3016 else
3017 status = be_fw_download(adapter, fw);
3018
Ajit Khaparde84517482009-09-04 03:12:16 +00003019fw_exit:
3020 release_firmware(fw);
3021 return status;
3022}
3023
stephen hemmingere5686ad2012-01-05 19:10:25 +00003024static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003025 .ndo_open = be_open,
3026 .ndo_stop = be_close,
3027 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003028 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003029 .ndo_set_mac_address = be_mac_addr_set,
3030 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003031 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003032 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003033 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3034 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003035 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003036 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003037 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003038 .ndo_get_vf_config = be_get_vf_config,
3039#ifdef CONFIG_NET_POLL_CONTROLLER
3040 .ndo_poll_controller = be_netpoll,
3041#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003042};
3043
3044static void be_netdev_init(struct net_device *netdev)
3045{
3046 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003047 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003048 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003049
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003050 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003051 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3052 NETIF_F_HW_VLAN_TX;
3053 if (be_multi_rxq(adapter))
3054 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003055
3056 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003057 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003058
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003059 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003060 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003061
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062 netdev->flags |= IFF_MULTICAST;
3063
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003064 netif_set_gso_max_size(netdev, 65535);
3065
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003066 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067
3068 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3069
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003070 for_all_evt_queues(adapter, eqo, i)
3071 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003072}
3073
3074static void be_unmap_pci_bars(struct be_adapter *adapter)
3075{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003076 if (adapter->csr)
3077 iounmap(adapter->csr);
3078 if (adapter->db)
3079 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080}
3081
3082static int be_map_pci_bars(struct be_adapter *adapter)
3083{
3084 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003085 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003087 if (lancer_chip(adapter)) {
3088 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3089 pci_resource_len(adapter->pdev, 0));
3090 if (addr == NULL)
3091 return -ENOMEM;
3092 adapter->db = addr;
3093 return 0;
3094 }
3095
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003096 if (be_physfn(adapter)) {
3097 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3098 pci_resource_len(adapter->pdev, 2));
3099 if (addr == NULL)
3100 return -ENOMEM;
3101 adapter->csr = addr;
3102 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003103
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003104 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003105 db_reg = 4;
3106 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003107 if (be_physfn(adapter))
3108 db_reg = 4;
3109 else
3110 db_reg = 0;
3111 }
3112 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3113 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003114 if (addr == NULL)
3115 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003116 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003117
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003118 return 0;
3119pci_map_err:
3120 be_unmap_pci_bars(adapter);
3121 return -ENOMEM;
3122}
3123
3124
3125static void be_ctrl_cleanup(struct be_adapter *adapter)
3126{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003127 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003128
3129 be_unmap_pci_bars(adapter);
3130
3131 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3133 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003134
Sathya Perla5b8821b2011-08-02 19:57:44 +00003135 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003136 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003137 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3138 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139}
3140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003141static int be_ctrl_init(struct be_adapter *adapter)
3142{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003143 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3144 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003145 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003146 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003147
3148 status = be_map_pci_bars(adapter);
3149 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003150 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003151
3152 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003153 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3154 mbox_mem_alloc->size,
3155 &mbox_mem_alloc->dma,
3156 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003157 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003158 status = -ENOMEM;
3159 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003160 }
3161 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3162 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3163 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3164 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003165
Sathya Perla5b8821b2011-08-02 19:57:44 +00003166 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3167 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3168 &rx_filter->dma, GFP_KERNEL);
3169 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003170 status = -ENOMEM;
3171 goto free_mbox;
3172 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003173 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003174
Ivan Vecera29849612010-12-14 05:43:19 +00003175 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003176 spin_lock_init(&adapter->mcc_lock);
3177 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003178
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003179 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003180 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003181 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003182
3183free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003184 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3185 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003186
3187unmap_pci_bars:
3188 be_unmap_pci_bars(adapter);
3189
3190done:
3191 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192}
3193
3194static void be_stats_cleanup(struct be_adapter *adapter)
3195{
Sathya Perla3abcded2010-10-03 22:12:27 -07003196 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003197
3198 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003199 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3200 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003201}
3202
3203static int be_stats_init(struct be_adapter *adapter)
3204{
Sathya Perla3abcded2010-10-03 22:12:27 -07003205 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003206
Selvin Xavier005d5692011-05-16 07:36:35 +00003207 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003208 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003209 } else {
3210 if (lancer_chip(adapter))
3211 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3212 else
3213 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3214 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003215 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3216 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003217 if (cmd->va == NULL)
3218 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003219 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003220 return 0;
3221}
3222
3223static void __devexit be_remove(struct pci_dev *pdev)
3224{
3225 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003226
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003227 if (!adapter)
3228 return;
3229
3230 unregister_netdev(adapter->netdev);
3231
Sathya Perla5fb379e2009-06-18 00:02:59 +00003232 be_clear(adapter);
3233
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003234 be_stats_cleanup(adapter);
3235
3236 be_ctrl_cleanup(adapter);
3237
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003238 be_sriov_disable(adapter);
3239
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003240 pci_set_drvdata(pdev, NULL);
3241 pci_release_regions(pdev);
3242 pci_disable_device(pdev);
3243
3244 free_netdev(adapter->netdev);
3245}
3246
Sathya Perla2243e2e2009-11-22 22:02:03 +00003247static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003248{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003249 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003250
Sathya Perla3abcded2010-10-03 22:12:27 -07003251 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3252 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003253 if (status)
3254 return status;
3255
Sathya Perla752961a2011-10-24 02:45:03 +00003256 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003257 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3258 else
3259 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3260
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003261 status = be_cmd_get_cntl_attributes(adapter);
3262 if (status)
3263 return status;
3264
Sathya Perla2243e2e2009-11-22 22:02:03 +00003265 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003266}
3267
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003268static int be_dev_family_check(struct be_adapter *adapter)
3269{
3270 struct pci_dev *pdev = adapter->pdev;
3271 u32 sli_intf = 0, if_type;
3272
3273 switch (pdev->device) {
3274 case BE_DEVICE_ID1:
3275 case OC_DEVICE_ID1:
3276 adapter->generation = BE_GEN2;
3277 break;
3278 case BE_DEVICE_ID2:
3279 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003280 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003281 adapter->generation = BE_GEN3;
3282 break;
3283 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003284 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003285 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3286 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3287 SLI_INTF_IF_TYPE_SHIFT;
3288
3289 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3290 if_type != 0x02) {
3291 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3292 return -EINVAL;
3293 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003294 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3295 SLI_INTF_FAMILY_SHIFT);
3296 adapter->generation = BE_GEN3;
3297 break;
3298 default:
3299 adapter->generation = 0;
3300 }
3301 return 0;
3302}
3303
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003304static int lancer_wait_ready(struct be_adapter *adapter)
3305{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003306#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003307 u32 sliport_status;
3308 int status = 0, i;
3309
3310 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3311 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3312 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3313 break;
3314
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003315 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003316 }
3317
3318 if (i == SLIPORT_READY_TIMEOUT)
3319 status = -1;
3320
3321 return status;
3322}
3323
3324static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3325{
3326 int status;
3327 u32 sliport_status, err, reset_needed;
3328 status = lancer_wait_ready(adapter);
3329 if (!status) {
3330 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3331 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3332 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3333 if (err && reset_needed) {
3334 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3335 adapter->db + SLIPORT_CONTROL_OFFSET);
3336
3337 /* check adapter has corrected the error */
3338 status = lancer_wait_ready(adapter);
3339 sliport_status = ioread32(adapter->db +
3340 SLIPORT_STATUS_OFFSET);
3341 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3342 SLIPORT_STATUS_RN_MASK);
3343 if (status || sliport_status)
3344 status = -1;
3345 } else if (err || reset_needed) {
3346 status = -1;
3347 }
3348 }
3349 return status;
3350}
3351
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003352static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3353{
3354 int status;
3355 u32 sliport_status;
3356
3357 if (adapter->eeh_err || adapter->ue_detected)
3358 return;
3359
3360 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3361
3362 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3363 dev_err(&adapter->pdev->dev,
3364 "Adapter in error state."
3365 "Trying to recover.\n");
3366
3367 status = lancer_test_and_set_rdy_state(adapter);
3368 if (status)
3369 goto err;
3370
3371 netif_device_detach(adapter->netdev);
3372
3373 if (netif_running(adapter->netdev))
3374 be_close(adapter->netdev);
3375
3376 be_clear(adapter);
3377
3378 adapter->fw_timeout = false;
3379
3380 status = be_setup(adapter);
3381 if (status)
3382 goto err;
3383
3384 if (netif_running(adapter->netdev)) {
3385 status = be_open(adapter->netdev);
3386 if (status)
3387 goto err;
3388 }
3389
3390 netif_device_attach(adapter->netdev);
3391
3392 dev_err(&adapter->pdev->dev,
3393 "Adapter error recovery succeeded\n");
3394 }
3395 return;
3396err:
3397 dev_err(&adapter->pdev->dev,
3398 "Adapter error recovery failed\n");
3399}
3400
3401static void be_worker(struct work_struct *work)
3402{
3403 struct be_adapter *adapter =
3404 container_of(work, struct be_adapter, work.work);
3405 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003406 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003407 int i;
3408
3409 if (lancer_chip(adapter))
3410 lancer_test_and_recover_fn_err(adapter);
3411
3412 be_detect_dump_ue(adapter);
3413
3414 /* when interrupts are not yet enabled, just reap any pending
3415 * mcc completions */
3416 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003417 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003418 goto reschedule;
3419 }
3420
3421 if (!adapter->stats_cmd_sent) {
3422 if (lancer_chip(adapter))
3423 lancer_cmd_get_pport_stats(adapter,
3424 &adapter->stats_cmd);
3425 else
3426 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3427 }
3428
3429 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003430 if (rxo->rx_post_starved) {
3431 rxo->rx_post_starved = false;
3432 be_post_rx_frags(rxo, GFP_KERNEL);
3433 }
3434 }
3435
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003436 for_all_evt_queues(adapter, eqo, i)
3437 be_eqd_update(adapter, eqo);
3438
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003439reschedule:
3440 adapter->work_counter++;
3441 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3442}
3443
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003444static int __devinit be_probe(struct pci_dev *pdev,
3445 const struct pci_device_id *pdev_id)
3446{
3447 int status = 0;
3448 struct be_adapter *adapter;
3449 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003450
3451 status = pci_enable_device(pdev);
3452 if (status)
3453 goto do_none;
3454
3455 status = pci_request_regions(pdev, DRV_NAME);
3456 if (status)
3457 goto disable_dev;
3458 pci_set_master(pdev);
3459
Sathya Perla3c8def92011-06-12 20:01:58 +00003460 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003461 if (netdev == NULL) {
3462 status = -ENOMEM;
3463 goto rel_reg;
3464 }
3465 adapter = netdev_priv(netdev);
3466 adapter->pdev = pdev;
3467 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003468
3469 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003470 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003471 goto free_netdev;
3472
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003473 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003474 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003475
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003476 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003477 if (!status) {
3478 netdev->features |= NETIF_F_HIGHDMA;
3479 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003480 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003481 if (status) {
3482 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3483 goto free_netdev;
3484 }
3485 }
3486
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003487 status = be_sriov_enable(adapter);
3488 if (status)
3489 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003491 status = be_ctrl_init(adapter);
3492 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003493 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003495 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003496 status = lancer_wait_ready(adapter);
3497 if (!status) {
3498 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3499 adapter->db + SLIPORT_CONTROL_OFFSET);
3500 status = lancer_test_and_set_rdy_state(adapter);
3501 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003502 if (status) {
3503 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003504 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003505 }
3506 }
3507
Sathya Perla2243e2e2009-11-22 22:02:03 +00003508 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003509 if (be_physfn(adapter)) {
3510 status = be_cmd_POST(adapter);
3511 if (status)
3512 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003513 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003514
3515 /* tell fw we're ready to fire cmds */
3516 status = be_cmd_fw_init(adapter);
3517 if (status)
3518 goto ctrl_clean;
3519
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003520 status = be_cmd_reset_function(adapter);
3521 if (status)
3522 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003523
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003524 /* The INTR bit may be set in the card when probed by a kdump kernel
3525 * after a crash.
3526 */
3527 if (!lancer_chip(adapter))
3528 be_intr_set(adapter, false);
3529
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003530 status = be_stats_init(adapter);
3531 if (status)
3532 goto ctrl_clean;
3533
Sathya Perla2243e2e2009-11-22 22:02:03 +00003534 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003535 if (status)
3536 goto stats_clean;
3537
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003538 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003539 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003540
Sathya Perla5fb379e2009-06-18 00:02:59 +00003541 status = be_setup(adapter);
3542 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003543 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003544
Sathya Perla3abcded2010-10-03 22:12:27 -07003545 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003546 status = register_netdev(netdev);
3547 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003548 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003549
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003550 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3551 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003552
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003553 return 0;
3554
Sathya Perla5fb379e2009-06-18 00:02:59 +00003555unsetup:
3556 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003557msix_disable:
3558 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003559stats_clean:
3560 be_stats_cleanup(adapter);
3561ctrl_clean:
3562 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003563disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003564 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003565free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003566 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003567 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003568rel_reg:
3569 pci_release_regions(pdev);
3570disable_dev:
3571 pci_disable_device(pdev);
3572do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003573 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003574 return status;
3575}
3576
3577static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3578{
3579 struct be_adapter *adapter = pci_get_drvdata(pdev);
3580 struct net_device *netdev = adapter->netdev;
3581
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003582 if (adapter->wol)
3583 be_setup_wol(adapter, true);
3584
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003585 netif_device_detach(netdev);
3586 if (netif_running(netdev)) {
3587 rtnl_lock();
3588 be_close(netdev);
3589 rtnl_unlock();
3590 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003591 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003592
3593 pci_save_state(pdev);
3594 pci_disable_device(pdev);
3595 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3596 return 0;
3597}
3598
3599static int be_resume(struct pci_dev *pdev)
3600{
3601 int status = 0;
3602 struct be_adapter *adapter = pci_get_drvdata(pdev);
3603 struct net_device *netdev = adapter->netdev;
3604
3605 netif_device_detach(netdev);
3606
3607 status = pci_enable_device(pdev);
3608 if (status)
3609 return status;
3610
3611 pci_set_power_state(pdev, 0);
3612 pci_restore_state(pdev);
3613
Sathya Perla2243e2e2009-11-22 22:02:03 +00003614 /* tell fw we're ready to fire cmds */
3615 status = be_cmd_fw_init(adapter);
3616 if (status)
3617 return status;
3618
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003619 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003620 if (netif_running(netdev)) {
3621 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003622 be_open(netdev);
3623 rtnl_unlock();
3624 }
3625 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003626
3627 if (adapter->wol)
3628 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003629
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003630 return 0;
3631}
3632
Sathya Perla82456b02010-02-17 01:35:37 +00003633/*
3634 * An FLR will stop BE from DMAing any data.
3635 */
3636static void be_shutdown(struct pci_dev *pdev)
3637{
3638 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003639
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003640 if (!adapter)
3641 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003642
Sathya Perla0f4a6822011-03-21 20:49:28 +00003643 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003644
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003645 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003646
Sathya Perla82456b02010-02-17 01:35:37 +00003647 if (adapter->wol)
3648 be_setup_wol(adapter, true);
3649
Ajit Khaparde57841862011-04-06 18:08:43 +00003650 be_cmd_reset_function(adapter);
3651
Sathya Perla82456b02010-02-17 01:35:37 +00003652 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003653}
3654
Sathya Perlacf588472010-02-14 21:22:01 +00003655static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3656 pci_channel_state_t state)
3657{
3658 struct be_adapter *adapter = pci_get_drvdata(pdev);
3659 struct net_device *netdev = adapter->netdev;
3660
3661 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3662
3663 adapter->eeh_err = true;
3664
3665 netif_device_detach(netdev);
3666
3667 if (netif_running(netdev)) {
3668 rtnl_lock();
3669 be_close(netdev);
3670 rtnl_unlock();
3671 }
3672 be_clear(adapter);
3673
3674 if (state == pci_channel_io_perm_failure)
3675 return PCI_ERS_RESULT_DISCONNECT;
3676
3677 pci_disable_device(pdev);
3678
3679 return PCI_ERS_RESULT_NEED_RESET;
3680}
3681
3682static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3683{
3684 struct be_adapter *adapter = pci_get_drvdata(pdev);
3685 int status;
3686
3687 dev_info(&adapter->pdev->dev, "EEH reset\n");
3688 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003689 adapter->ue_detected = false;
3690 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003691
3692 status = pci_enable_device(pdev);
3693 if (status)
3694 return PCI_ERS_RESULT_DISCONNECT;
3695
3696 pci_set_master(pdev);
3697 pci_set_power_state(pdev, 0);
3698 pci_restore_state(pdev);
3699
3700 /* Check if card is ok and fw is ready */
3701 status = be_cmd_POST(adapter);
3702 if (status)
3703 return PCI_ERS_RESULT_DISCONNECT;
3704
3705 return PCI_ERS_RESULT_RECOVERED;
3706}
3707
3708static void be_eeh_resume(struct pci_dev *pdev)
3709{
3710 int status = 0;
3711 struct be_adapter *adapter = pci_get_drvdata(pdev);
3712 struct net_device *netdev = adapter->netdev;
3713
3714 dev_info(&adapter->pdev->dev, "EEH resume\n");
3715
3716 pci_save_state(pdev);
3717
3718 /* tell fw we're ready to fire cmds */
3719 status = be_cmd_fw_init(adapter);
3720 if (status)
3721 goto err;
3722
3723 status = be_setup(adapter);
3724 if (status)
3725 goto err;
3726
3727 if (netif_running(netdev)) {
3728 status = be_open(netdev);
3729 if (status)
3730 goto err;
3731 }
3732 netif_device_attach(netdev);
3733 return;
3734err:
3735 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003736}
3737
3738static struct pci_error_handlers be_eeh_handlers = {
3739 .error_detected = be_eeh_err_detected,
3740 .slot_reset = be_eeh_reset,
3741 .resume = be_eeh_resume,
3742};
3743
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003744static struct pci_driver be_driver = {
3745 .name = DRV_NAME,
3746 .id_table = be_dev_ids,
3747 .probe = be_probe,
3748 .remove = be_remove,
3749 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003750 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003751 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003752 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003753};
3754
3755static int __init be_init_module(void)
3756{
Joe Perches8e95a202009-12-03 07:58:21 +00003757 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3758 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003759 printk(KERN_WARNING DRV_NAME
3760 " : Module param rx_frag_size must be 2048/4096/8192."
3761 " Using 2048\n");
3762 rx_frag_size = 2048;
3763 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003764
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003765 return pci_register_driver(&be_driver);
3766}
3767module_init(be_init_module);
3768
3769static void __exit be_exit_module(void)
3770{
3771 pci_unregister_driver(&be_driver);
3772}
3773module_exit(be_exit_module);