blob: 9f2f66c66be67872b319b55a1bb609ed3fe68d11 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147
Sathya Perlacf588472010-02-14 21:22:01 +0000148 if (adapter->eeh_err)
149 return;
150
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 iowrite32(reg, addr);
159}
160
Sathya Perla8788fdc2009-07-27 22:52:03 +0000161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000166
167 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000176
177 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179}
180
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000188
189 if (adapter->eeh_err)
190 return;
191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000207
208 if (adapter->eeh_err)
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
Ajit Khapardef8617e02011-02-11 13:36:37 +0000232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000234 if (status)
235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000238 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000239netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000246static void populate_be2_stats(struct be_adapter *adapter)
247{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000251 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000254
Sathya Perlaac124ff2011-07-25 19:10:14 +0000255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000281 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000282 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000283 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
Selvin Xavier005d5692011-05-16 07:36:35 +0000340static void populate_lancer_stats(struct be_adapter *adapter)
341{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000342
Selvin Xavier005d5692011-05-16 07:36:35 +0000343 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000374 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000376}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377
378void be_parse_stats(struct be_adapter *adapter)
379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397}
398
Sathya Perlaab1594e2011-07-25 19:10:15 +0000399static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700401{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000402 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700404 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000405 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000406 u64 pkts, bytes;
407 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700408 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409
Sathya Perla3abcded2010-10-03 22:12:27 -0700410 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700422 }
423
Sathya Perla3c8def92011-06-12 20:01:58 +0000424 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000433 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700434
435 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000445 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700446
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700447 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000448 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000451
Sathya Perlaab1594e2011-07-25 19:10:15 +0000452 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700453
454 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000456
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000462 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463}
464
Sathya Perla8788fdc2009-07-27 22:52:03 +0000465void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700466{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467 struct net_device *netdev = adapter->netdev;
468
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700469 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000470 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000471 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000472 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700473 netif_carrier_on(netdev);
474 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000475 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000476 netif_carrier_off(netdev);
477 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700478 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000479 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481}
482
Sathya Perla3c8def92011-06-12 20:01:58 +0000483static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000484 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700485{
Sathya Perla3c8def92011-06-12 20:01:58 +0000486 struct be_tx_stats *stats = tx_stats(txo);
487
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000489 stats->tx_reqs++;
490 stats->tx_wrbs += wrb_cnt;
491 stats->tx_bytes += copied;
492 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496}
497
498/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000499static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
500 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700502 int cnt = (skb->len > skb->data_len);
503
504 cnt += skb_shinfo(skb)->nr_frags;
505
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506 /* to account for hdr wrb */
507 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000508 if (lancer_chip(adapter) || !(cnt & 1)) {
509 *dummy = false;
510 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511 /* add a dummy to make it an even num */
512 cnt++;
513 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000514 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
516 return cnt;
517}
518
519static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
520{
521 wrb->frag_pa_hi = upper_32_bits(addr);
522 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
523 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
524}
525
Somnath Koturcc4ce022010-10-21 07:11:14 -0700526static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
527 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700528{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700529 u8 vlan_prio = 0;
530 u16 vlan_tag = 0;
531
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532 memset(hdr, 0, sizeof(*hdr));
533
534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
535
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000536 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
539 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000540 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000542 if (lancer_chip(adapter) && adapter->sli_family ==
543 LANCER_A0_SLI_FAMILY) {
544 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
545 if (is_tcp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 tcpcs, hdr, 1);
548 else if (is_udp_pkt(skb))
549 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
550 udpcs, hdr, 1);
551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
553 if (is_tcp_pkt(skb))
554 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
555 else if (is_udp_pkt(skb))
556 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
557 }
558
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700559 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700561 vlan_tag = vlan_tx_tag_get(skb);
562 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
563 /* If vlan priority provided by OS is NOT in available bmap */
564 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
565 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
566 adapter->recommended_prio;
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568 }
569
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
571 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
574}
575
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000576static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000577 bool unmap_single)
578{
579 dma_addr_t dma;
580
581 be_dws_le_to_cpu(wrb, sizeof(*wrb));
582
583 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000584 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000585 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000586 dma_unmap_single(dev, dma, wrb->frag_len,
587 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000588 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000589 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000590 }
591}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3c8def92011-06-12 20:01:58 +0000593static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
595{
Sathya Perla7101e112010-03-22 20:41:12 +0000596 dma_addr_t busaddr;
597 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000598 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700599 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700600 struct be_eth_wrb *wrb;
601 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000602 bool map_single = false;
603 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 hdr = queue_head_node(txq);
606 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000607 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608
David S. Millerebc8d2a2009-06-09 01:01:31 -0700609 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700610 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000611 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
612 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000613 goto dma_err;
614 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700615 wrb = queue_head_node(txq);
616 wrb_fill(wrb, busaddr, len);
617 be_dws_cpu_to_le(wrb, sizeof(*wrb));
618 queue_head_inc(txq);
619 copied += len;
620 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621
David S. Millerebc8d2a2009-06-09 01:01:31 -0700622 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
623 struct skb_frag_struct *frag =
624 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000625 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
626 frag->size, DMA_TO_DEVICE);
627 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000628 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700629 wrb = queue_head_node(txq);
630 wrb_fill(wrb, busaddr, frag->size);
631 be_dws_cpu_to_le(wrb, sizeof(*wrb));
632 queue_head_inc(txq);
633 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 }
635
636 if (dummy_wrb) {
637 wrb = queue_head_node(txq);
638 wrb_fill(wrb, 0, 0);
639 be_dws_cpu_to_le(wrb, sizeof(*wrb));
640 queue_head_inc(txq);
641 }
642
Somnath Koturcc4ce022010-10-21 07:11:14 -0700643 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 be_dws_cpu_to_le(hdr, sizeof(*hdr));
645
646 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000647dma_err:
648 txq->head = map_head;
649 while (copied) {
650 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000652 map_single = false;
653 copied -= wrb->frag_len;
654 queue_head_inc(txq);
655 }
656 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Stephen Hemminger613573252009-08-31 19:50:58 +0000659static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700660 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661{
662 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
664 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 u32 wrb_cnt = 0, copied = 0;
666 u32 start = txq->head;
667 bool dummy_wrb, stopped = false;
668
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000669 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670
Sathya Perla3c8def92011-06-12 20:01:58 +0000671 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000672 if (copied) {
673 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000674 BUG_ON(txo->sent_skb_list[start]);
675 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000677 /* Ensure txq has space for the next skb; Else stop the queue
678 * *BEFORE* ringing the tx doorbell, so that we serialze the
679 * tx compls of the current transmit which'll wake up the queue
680 */
Sathya Perla7101e112010-03-22 20:41:12 +0000681 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000682 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
683 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000684 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000685 stopped = true;
686 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000688 be_txq_notify(adapter, txq->id, wrb_cnt);
689
Sathya Perla3c8def92011-06-12 20:01:58 +0000690 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000691 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000692 } else {
693 txq->head = start;
694 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696 return NETDEV_TX_OK;
697}
698
699static int be_change_mtu(struct net_device *netdev, int new_mtu)
700{
701 struct be_adapter *adapter = netdev_priv(netdev);
702 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000703 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
704 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705 dev_info(&adapter->pdev->dev,
706 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000707 BE_MIN_MTU,
708 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709 return -EINVAL;
710 }
711 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
712 netdev->mtu, new_mtu);
713 netdev->mtu = new_mtu;
714 return 0;
715}
716
717/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000718 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
719 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000721static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723 u16 vtag[BE_NUM_VLANS_SUPPORTED];
724 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000725 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000726 u32 if_handle;
727
728 if (vf) {
729 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
730 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
731 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
732 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733
Ajit Khaparde82903e42010-02-09 01:34:57 +0000734 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000736 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 if (adapter->vlan_tag[i]) {
738 vtag[ntags] = cpu_to_le16(i);
739 ntags++;
740 }
741 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700742 status = be_cmd_vlan_config(adapter, adapter->if_handle,
743 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700745 status = be_cmd_vlan_config(adapter, adapter->if_handle,
746 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000748
Sathya Perlab31c50a2009-09-17 10:30:13 -0700749 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750}
751
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
753{
754 struct be_adapter *adapter = netdev_priv(netdev);
755
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000756 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000757 if (!be_physfn(adapter))
758 return;
759
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000761 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000762 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763}
764
765static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
766{
767 struct be_adapter *adapter = netdev_priv(netdev);
768
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000769 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000770
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000771 if (!be_physfn(adapter))
772 return;
773
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000775 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000776 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777}
778
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779static void be_set_multicast_list(struct net_device *netdev)
780{
781 struct be_adapter *adapter = netdev_priv(netdev);
782
783 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000784 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000785 adapter->promiscuous = true;
786 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000788
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300789 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000790 if (adapter->promiscuous) {
791 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000792 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000793 }
794
Sathya Perlae7b909a2009-11-22 22:01:10 +0000795 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000796 if (netdev->flags & IFF_ALLMULTI ||
797 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000798 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000799 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000800 goto done;
801 }
802
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000803 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800804 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000805done:
806 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807}
808
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000809static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
810{
811 struct be_adapter *adapter = netdev_priv(netdev);
812 int status;
813
814 if (!adapter->sriov_enabled)
815 return -EPERM;
816
817 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
818 return -EINVAL;
819
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000820 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
821 status = be_cmd_pmac_del(adapter,
822 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000823 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000824
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000825 status = be_cmd_pmac_add(adapter, mac,
826 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000827 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000828
829 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000830 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
831 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000832 else
833 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
834
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000835 return status;
836}
837
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000838static int be_get_vf_config(struct net_device *netdev, int vf,
839 struct ifla_vf_info *vi)
840{
841 struct be_adapter *adapter = netdev_priv(netdev);
842
843 if (!adapter->sriov_enabled)
844 return -EPERM;
845
846 if (vf >= num_vfs)
847 return -EINVAL;
848
849 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000850 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000851 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000852 vi->qos = 0;
853 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
854
855 return 0;
856}
857
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000858static int be_set_vf_vlan(struct net_device *netdev,
859 int vf, u16 vlan, u8 qos)
860{
861 struct be_adapter *adapter = netdev_priv(netdev);
862 int status = 0;
863
864 if (!adapter->sriov_enabled)
865 return -EPERM;
866
867 if ((vf >= num_vfs) || (vlan > 4095))
868 return -EINVAL;
869
870 if (vlan) {
871 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
872 adapter->vlans_added++;
873 } else {
874 adapter->vf_cfg[vf].vf_vlan_tag = 0;
875 adapter->vlans_added--;
876 }
877
878 status = be_vid_config(adapter, true, vf);
879
880 if (status)
881 dev_info(&adapter->pdev->dev,
882 "VLAN %d config on VF %d failed\n", vlan, vf);
883 return status;
884}
885
Ajit Khapardee1d18732010-07-23 01:52:13 +0000886static int be_set_vf_tx_rate(struct net_device *netdev,
887 int vf, int rate)
888{
889 struct be_adapter *adapter = netdev_priv(netdev);
890 int status = 0;
891
892 if (!adapter->sriov_enabled)
893 return -EPERM;
894
895 if ((vf >= num_vfs) || (rate < 0))
896 return -EINVAL;
897
898 if (rate > 10000)
899 rate = 10000;
900
901 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000902 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000903
904 if (status)
905 dev_info(&adapter->pdev->dev,
906 "tx rate %d on VF %d failed\n", rate, vf);
907 return status;
908}
909
Sathya Perlaac124ff2011-07-25 19:10:14 +0000910static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700911{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000912 struct be_eq_obj *rx_eq = &rxo->rx_eq;
913 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700914 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000915 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000916 u64 pkts;
917 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000918
919 if (!rx_eq->enable_aic)
920 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700921
Sathya Perla4097f662009-03-24 16:40:13 -0700922 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700923 if (time_before(now, stats->rx_jiffies)) {
924 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700925 return;
926 }
927
Sathya Perlaac124ff2011-07-25 19:10:14 +0000928 /* Update once a second */
929 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700930 return;
931
Sathya Perlaab1594e2011-07-25 19:10:15 +0000932 do {
933 start = u64_stats_fetch_begin_bh(&stats->sync);
934 pkts = stats->rx_pkts;
935 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
936
937 stats->rx_pps = (pkts - stats->rx_pkts_prev) / (delta / HZ);
938 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700939 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000940 eqd = stats->rx_pps / 110000;
941 eqd = eqd << 3;
942 if (eqd > rx_eq->max_eqd)
943 eqd = rx_eq->max_eqd;
944 if (eqd < rx_eq->min_eqd)
945 eqd = rx_eq->min_eqd;
946 if (eqd < 10)
947 eqd = 0;
948 if (eqd != rx_eq->cur_eqd) {
949 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
950 rx_eq->cur_eqd = eqd;
951 }
Sathya Perla4097f662009-03-24 16:40:13 -0700952}
953
Sathya Perla3abcded2010-10-03 22:12:27 -0700954static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000955 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700956{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000957 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700958
Sathya Perlaab1594e2011-07-25 19:10:15 +0000959 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700960 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000961 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700962 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000963 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700964 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000965 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000966 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000967 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700968}
969
Sathya Perla2e588f82011-03-11 02:49:26 +0000970static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700971{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000972 /* L4 checksum is not reliable for non TCP/UDP packets.
973 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000974 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
975 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700976}
977
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700978static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700979get_rx_page_info(struct be_adapter *adapter,
980 struct be_rx_obj *rxo,
981 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982{
983 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700984 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700985
Sathya Perla3abcded2010-10-03 22:12:27 -0700986 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987 BUG_ON(!rx_page_info->page);
988
Ajit Khaparde205859a2010-02-09 01:34:21 +0000989 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000990 dma_unmap_page(&adapter->pdev->dev,
991 dma_unmap_addr(rx_page_info, bus),
992 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000993 rx_page_info->last_page_user = false;
994 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700995
996 atomic_dec(&rxq->used);
997 return rx_page_info;
998}
999
1000/* Throwaway the data in the Rx completion */
1001static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001002 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001003 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001004{
Sathya Perla3abcded2010-10-03 22:12:27 -07001005 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001007 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001009 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001010 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001011 put_page(page_info->page);
1012 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001013 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014 }
1015}
1016
1017/*
1018 * skb_fill_rx_data forms a complete skb for an ether frame
1019 * indicated by rxcp.
1020 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001021static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001022 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023{
Sathya Perla3abcded2010-10-03 22:12:27 -07001024 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001026 u16 i, j;
1027 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028 u8 *start;
1029
Sathya Perla2e588f82011-03-11 02:49:26 +00001030 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001031 start = page_address(page_info->page) + page_info->page_offset;
1032 prefetch(start);
1033
1034 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001035 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001036
1037 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001038 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039 memcpy(skb->data, start, hdr_len);
1040 skb->len = curr_frag_len;
1041 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1042 /* Complete packet has now been moved to data */
1043 put_page(page_info->page);
1044 skb->data_len = 0;
1045 skb->tail += curr_frag_len;
1046 } else {
1047 skb_shinfo(skb)->nr_frags = 1;
1048 skb_shinfo(skb)->frags[0].page = page_info->page;
1049 skb_shinfo(skb)->frags[0].page_offset =
1050 page_info->page_offset + hdr_len;
1051 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1052 skb->data_len = curr_frag_len - hdr_len;
1053 skb->tail += hdr_len;
1054 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001055 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001056
Sathya Perla2e588f82011-03-11 02:49:26 +00001057 if (rxcp->pkt_size <= rx_frag_size) {
1058 BUG_ON(rxcp->num_rcvd != 1);
1059 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060 }
1061
1062 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001063 index_inc(&rxcp->rxq_idx, rxq->len);
1064 remaining = rxcp->pkt_size - curr_frag_len;
1065 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1066 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1067 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001069 /* Coalesce all frags from the same physical page in one slot */
1070 if (page_info->page_offset == 0) {
1071 /* Fresh page */
1072 j++;
1073 skb_shinfo(skb)->frags[j].page = page_info->page;
1074 skb_shinfo(skb)->frags[j].page_offset =
1075 page_info->page_offset;
1076 skb_shinfo(skb)->frags[j].size = 0;
1077 skb_shinfo(skb)->nr_frags++;
1078 } else {
1079 put_page(page_info->page);
1080 }
1081
1082 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083 skb->len += curr_frag_len;
1084 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085
Sathya Perla2e588f82011-03-11 02:49:26 +00001086 remaining -= curr_frag_len;
1087 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001088 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001090 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091}
1092
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001093/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001095 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001096 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001098 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001100
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001101 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001102 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001103 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001104 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105 return;
1106 }
1107
Sathya Perla2e588f82011-03-11 02:49:26 +00001108 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001109
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001110 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001111 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001112 else
1113 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001114
1115 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001116 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001117 if (adapter->netdev->features & NETIF_F_RXHASH)
1118 skb->rxhash = rxcp->rss_hash;
1119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001121 if (unlikely(rxcp->vlanf))
1122 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1123
1124 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125}
1126
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001127/* Process the RX completion indicated by rxcp when GRO is enabled */
1128static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001129 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001130 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131{
1132 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001133 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001134 struct be_queue_info *rxq = &rxo->q;
1135 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001136 u16 remaining, curr_frag_len;
1137 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001138
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001139 skb = napi_get_frags(&eq_obj->napi);
1140 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001141 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001142 return;
1143 }
1144
Sathya Perla2e588f82011-03-11 02:49:26 +00001145 remaining = rxcp->pkt_size;
1146 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1147 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148
1149 curr_frag_len = min(remaining, rx_frag_size);
1150
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001151 /* Coalesce all frags from the same physical page in one slot */
1152 if (i == 0 || page_info->page_offset == 0) {
1153 /* First frag or Fresh page */
1154 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001155 skb_shinfo(skb)->frags[j].page = page_info->page;
1156 skb_shinfo(skb)->frags[j].page_offset =
1157 page_info->page_offset;
1158 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001159 } else {
1160 put_page(page_info->page);
1161 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001162 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001163
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001165 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166 memset(page_info, 0, sizeof(*page_info));
1167 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001168 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001170 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001171 skb->len = rxcp->pkt_size;
1172 skb->data_len = rxcp->pkt_size;
1173 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001174 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001175 if (adapter->netdev->features & NETIF_F_RXHASH)
1176 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001177
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001178 if (unlikely(rxcp->vlanf))
1179 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1180
1181 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182}
1183
Sathya Perla2e588f82011-03-11 02:49:26 +00001184static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1185 struct be_eth_rx_compl *compl,
1186 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187{
Sathya Perla2e588f82011-03-11 02:49:26 +00001188 rxcp->pkt_size =
1189 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1190 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1191 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1192 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001193 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001194 rxcp->ip_csum =
1195 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1196 rxcp->l4_csum =
1197 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1198 rxcp->ipv6 =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1200 rxcp->rxq_idx =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1202 rxcp->num_rcvd =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1204 rxcp->pkt_type =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001206 rxcp->rss_hash =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001208 if (rxcp->vlanf) {
1209 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001210 compl);
1211 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1212 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001213 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001214}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215
Sathya Perla2e588f82011-03-11 02:49:26 +00001216static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1217 struct be_eth_rx_compl *compl,
1218 struct be_rx_compl_info *rxcp)
1219{
1220 rxcp->pkt_size =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1222 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1223 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1224 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001225 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001226 rxcp->ip_csum =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1228 rxcp->l4_csum =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1230 rxcp->ipv6 =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1232 rxcp->rxq_idx =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1234 rxcp->num_rcvd =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1236 rxcp->pkt_type =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001238 rxcp->rss_hash =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001240 if (rxcp->vlanf) {
1241 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001242 compl);
1243 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1244 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001245 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001246}
1247
1248static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1249{
1250 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1251 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1252 struct be_adapter *adapter = rxo->adapter;
1253
1254 /* For checking the valid bit it is Ok to use either definition as the
1255 * valid bit is at the same position in both v0 and v1 Rx compl */
1256 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257 return NULL;
1258
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001259 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001260 be_dws_le_to_cpu(compl, sizeof(*compl));
1261
1262 if (adapter->be3_native)
1263 be_parse_rx_compl_v1(adapter, compl, rxcp);
1264 else
1265 be_parse_rx_compl_v0(adapter, compl, rxcp);
1266
Sathya Perla15d72182011-03-21 20:49:26 +00001267 if (rxcp->vlanf) {
1268 /* vlanf could be wrongly set in some cards.
1269 * ignore if vtm is not set */
1270 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1271 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001272
Sathya Perla15d72182011-03-21 20:49:26 +00001273 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001274 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001275
David S. Miller3c709f82011-05-11 14:26:15 -04001276 if (((adapter->pvid & VLAN_VID_MASK) ==
1277 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1278 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001279 rxcp->vlanf = 0;
1280 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001281
1282 /* As the compl has been parsed, reset it; we wont touch it again */
1283 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284
Sathya Perla3abcded2010-10-03 22:12:27 -07001285 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286 return rxcp;
1287}
1288
Eric Dumazet1829b082011-03-01 05:48:12 +00001289static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001294 gfp |= __GFP_COMP;
1295 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296}
1297
1298/*
1299 * Allocate a page, split it to fragments of size rx_frag_size and post as
1300 * receive buffers to BE
1301 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001302static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303{
Sathya Perla3abcded2010-10-03 22:12:27 -07001304 struct be_adapter *adapter = rxo->adapter;
1305 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001306 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001307 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308 struct page *pagep = NULL;
1309 struct be_eth_rx_d *rxd;
1310 u64 page_dmaaddr = 0, frag_dmaaddr;
1311 u32 posted, page_offset = 0;
1312
Sathya Perla3abcded2010-10-03 22:12:27 -07001313 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001314 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1315 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001316 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001318 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319 break;
1320 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001321 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1322 0, adapter->big_page_size,
1323 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324 page_info->page_offset = 0;
1325 } else {
1326 get_page(pagep);
1327 page_info->page_offset = page_offset + rx_frag_size;
1328 }
1329 page_offset = page_info->page_offset;
1330 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001331 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1333
1334 rxd = queue_head_node(rxq);
1335 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1336 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337
1338 /* Any space left in the current big page for another frag? */
1339 if ((page_offset + rx_frag_size + rx_frag_size) >
1340 adapter->big_page_size) {
1341 pagep = NULL;
1342 page_info->last_page_user = true;
1343 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001344
1345 prev_page_info = page_info;
1346 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347 page_info = &page_info_tbl[rxq->head];
1348 }
1349 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001350 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001351
1352 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001354 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001355 } else if (atomic_read(&rxq->used) == 0) {
1356 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001357 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359}
1360
Sathya Perla5fb379e2009-06-18 00:02:59 +00001361static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1364
1365 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1366 return NULL;
1367
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001368 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001369 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1370
1371 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1372
1373 queue_tail_inc(tx_cq);
1374 return txcp;
1375}
1376
Sathya Perla3c8def92011-06-12 20:01:58 +00001377static u16 be_tx_compl_process(struct be_adapter *adapter,
1378 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla3c8def92011-06-12 20:01:58 +00001380 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001381 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001382 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001384 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1385 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001387 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001389 sent_skbs[txq->tail] = NULL;
1390
1391 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001392 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001394 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001396 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001397 unmap_tx_frag(&adapter->pdev->dev, wrb,
1398 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001399 unmap_skb_hdr = false;
1400
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 num_wrbs++;
1402 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001403 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001406 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407}
1408
Sathya Perla859b1e42009-08-10 03:43:51 +00001409static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1410{
1411 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1412
1413 if (!eqe->evt)
1414 return NULL;
1415
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001416 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001417 eqe->evt = le32_to_cpu(eqe->evt);
1418 queue_tail_inc(&eq_obj->q);
1419 return eqe;
1420}
1421
1422static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001423 struct be_eq_obj *eq_obj,
1424 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001425{
1426 struct be_eq_entry *eqe;
1427 u16 num = 0;
1428
1429 while ((eqe = event_get(eq_obj)) != NULL) {
1430 eqe->evt = 0;
1431 num++;
1432 }
1433
1434 /* Deal with any spurious interrupts that come
1435 * without events
1436 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001437 if (!num)
1438 rearm = true;
1439
1440 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001441 if (num)
1442 napi_schedule(&eq_obj->napi);
1443
1444 return num;
1445}
1446
1447/* Just read and notify events without processing them.
1448 * Used at the time of destroying event queues */
1449static void be_eq_clean(struct be_adapter *adapter,
1450 struct be_eq_obj *eq_obj)
1451{
1452 struct be_eq_entry *eqe;
1453 u16 num = 0;
1454
1455 while ((eqe = event_get(eq_obj)) != NULL) {
1456 eqe->evt = 0;
1457 num++;
1458 }
1459
1460 if (num)
1461 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1462}
1463
Sathya Perla3abcded2010-10-03 22:12:27 -07001464static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465{
1466 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001467 struct be_queue_info *rxq = &rxo->q;
1468 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001469 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 u16 tail;
1471
1472 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001473 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1474 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001475 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 }
1477
1478 /* Then free posted rx buffer that were not used */
1479 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001480 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001481 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482 put_page(page_info->page);
1483 memset(page_info, 0, sizeof(*page_info));
1484 }
1485 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001486 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487}
1488
Sathya Perla3c8def92011-06-12 20:01:58 +00001489static void be_tx_compl_clean(struct be_adapter *adapter,
1490 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491{
Sathya Perla3c8def92011-06-12 20:01:58 +00001492 struct be_queue_info *tx_cq = &txo->cq;
1493 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001494 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001495 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001496 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001497 struct sk_buff *sent_skb;
1498 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499
Sathya Perlaa8e91792009-08-10 03:42:43 +00001500 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1501 do {
1502 while ((txcp = be_tx_compl_get(tx_cq))) {
1503 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1504 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001505 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001506 cmpl++;
1507 }
1508 if (cmpl) {
1509 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001510 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001511 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001512 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001513 }
1514
1515 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1516 break;
1517
1518 mdelay(1);
1519 } while (true);
1520
1521 if (atomic_read(&txq->used))
1522 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1523 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001524
1525 /* free posted tx for which compls will never arrive */
1526 while (atomic_read(&txq->used)) {
1527 sent_skb = sent_skbs[txq->tail];
1528 end_idx = txq->tail;
1529 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001530 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1531 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001532 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001533 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001534 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535}
1536
Sathya Perla5fb379e2009-06-18 00:02:59 +00001537static void be_mcc_queues_destroy(struct be_adapter *adapter)
1538{
1539 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001540
Sathya Perla8788fdc2009-07-27 22:52:03 +00001541 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001542 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001543 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001544 be_queue_free(adapter, q);
1545
Sathya Perla8788fdc2009-07-27 22:52:03 +00001546 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001547 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001548 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001549 be_queue_free(adapter, q);
1550}
1551
1552/* Must be called only after TX qs are created as MCC shares TX EQ */
1553static int be_mcc_queues_create(struct be_adapter *adapter)
1554{
1555 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001556
1557 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001558 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001559 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001560 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001561 goto err;
1562
1563 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001564 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001565 goto mcc_cq_free;
1566
1567 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001568 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001569 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1570 goto mcc_cq_destroy;
1571
1572 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001573 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001574 goto mcc_q_free;
1575
1576 return 0;
1577
1578mcc_q_free:
1579 be_queue_free(adapter, q);
1580mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001581 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001582mcc_cq_free:
1583 be_queue_free(adapter, cq);
1584err:
1585 return -1;
1586}
1587
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588static void be_tx_queues_destroy(struct be_adapter *adapter)
1589{
1590 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001591 struct be_tx_obj *txo;
1592 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593
Sathya Perla3c8def92011-06-12 20:01:58 +00001594 for_all_tx_queues(adapter, txo, i) {
1595 q = &txo->q;
1596 if (q->created)
1597 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1598 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599
Sathya Perla3c8def92011-06-12 20:01:58 +00001600 q = &txo->cq;
1601 if (q->created)
1602 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1603 be_queue_free(adapter, q);
1604 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605
Sathya Perla859b1e42009-08-10 03:43:51 +00001606 /* Clear any residual events */
1607 be_eq_clean(adapter, &adapter->tx_eq);
1608
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609 q = &adapter->tx_eq.q;
1610 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001611 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 be_queue_free(adapter, q);
1613}
1614
Sathya Perla3c8def92011-06-12 20:01:58 +00001615/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616static int be_tx_queues_create(struct be_adapter *adapter)
1617{
1618 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001619 struct be_tx_obj *txo;
1620 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621
1622 adapter->tx_eq.max_eqd = 0;
1623 adapter->tx_eq.min_eqd = 0;
1624 adapter->tx_eq.cur_eqd = 96;
1625 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001626
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001628 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1629 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 return -1;
1631
Sathya Perla8788fdc2009-07-27 22:52:03 +00001632 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001633 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001634 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001635
Sathya Perla3c8def92011-06-12 20:01:58 +00001636 for_all_tx_queues(adapter, txo, i) {
1637 cq = &txo->cq;
1638 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001640 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641
Sathya Perla3c8def92011-06-12 20:01:58 +00001642 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1643 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644
Sathya Perla3c8def92011-06-12 20:01:58 +00001645 q = &txo->q;
1646 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1647 sizeof(struct be_eth_wrb)))
1648 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649
Sathya Perla3c8def92011-06-12 20:01:58 +00001650 if (be_cmd_txq_create(adapter, q, cq))
1651 goto err;
1652 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 return 0;
1654
Sathya Perla3c8def92011-06-12 20:01:58 +00001655err:
1656 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657 return -1;
1658}
1659
1660static void be_rx_queues_destroy(struct be_adapter *adapter)
1661{
1662 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001663 struct be_rx_obj *rxo;
1664 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665
Sathya Perla3abcded2010-10-03 22:12:27 -07001666 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001667 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001668
Sathya Perla3abcded2010-10-03 22:12:27 -07001669 q = &rxo->cq;
1670 if (q->created)
1671 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1672 be_queue_free(adapter, q);
1673
Sathya Perla3abcded2010-10-03 22:12:27 -07001674 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001675 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001676 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001677 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679}
1680
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001681static u32 be_num_rxqs_want(struct be_adapter *adapter)
1682{
Sathya Perlac814fd32011-06-26 20:41:25 +00001683 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001684 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1685 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1686 } else {
1687 dev_warn(&adapter->pdev->dev,
1688 "No support for multiple RX queues\n");
1689 return 1;
1690 }
1691}
1692
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693static int be_rx_queues_create(struct be_adapter *adapter)
1694{
1695 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001696 struct be_rx_obj *rxo;
1697 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001699 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1700 msix_enabled(adapter) ?
1701 adapter->num_msix_vec - 1 : 1);
1702 if (adapter->num_rx_qs != MAX_RX_QS)
1703 dev_warn(&adapter->pdev->dev,
1704 "Can create only %d RX queues", adapter->num_rx_qs);
1705
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001707 for_all_rx_queues(adapter, rxo, i) {
1708 rxo->adapter = adapter;
1709 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1710 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001711
Sathya Perla3abcded2010-10-03 22:12:27 -07001712 /* EQ */
1713 eq = &rxo->rx_eq.q;
1714 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1715 sizeof(struct be_eq_entry));
1716 if (rc)
1717 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718
Sathya Perla3abcded2010-10-03 22:12:27 -07001719 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1720 if (rc)
1721 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001723 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001724
Sathya Perla3abcded2010-10-03 22:12:27 -07001725 /* CQ */
1726 cq = &rxo->cq;
1727 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1728 sizeof(struct be_eth_rx_compl));
1729 if (rc)
1730 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731
Sathya Perla3abcded2010-10-03 22:12:27 -07001732 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1733 if (rc)
1734 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001735
1736 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001737 q = &rxo->q;
1738 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1739 sizeof(struct be_eth_rx_d));
1740 if (rc)
1741 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742
Sathya Perla3abcded2010-10-03 22:12:27 -07001743 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744
1745 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001746err:
1747 be_rx_queues_destroy(adapter);
1748 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001751static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001752{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001753 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1754 if (!eqe->evt)
1755 return false;
1756 else
1757 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001758}
1759
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760static irqreturn_t be_intx(int irq, void *dev)
1761{
1762 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001763 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001764 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001766 if (lancer_chip(adapter)) {
1767 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001768 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001769 for_all_rx_queues(adapter, rxo, i) {
1770 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001771 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001772 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001774 if (!(tx || rx))
1775 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001776
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001777 } else {
1778 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1779 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1780 if (!isr)
1781 return IRQ_NONE;
1782
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001783 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001784 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001785
1786 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001787 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001788 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001789 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001790 }
Sathya Perlac001c212009-07-01 01:06:07 +00001791
Sathya Perla8788fdc2009-07-27 22:52:03 +00001792 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001793}
1794
1795static irqreturn_t be_msix_rx(int irq, void *dev)
1796{
Sathya Perla3abcded2010-10-03 22:12:27 -07001797 struct be_rx_obj *rxo = dev;
1798 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799
Sathya Perla3c8def92011-06-12 20:01:58 +00001800 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801
1802 return IRQ_HANDLED;
1803}
1804
Sathya Perla5fb379e2009-06-18 00:02:59 +00001805static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806{
1807 struct be_adapter *adapter = dev;
1808
Sathya Perla3c8def92011-06-12 20:01:58 +00001809 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810
1811 return IRQ_HANDLED;
1812}
1813
Sathya Perla2e588f82011-03-11 02:49:26 +00001814static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815{
Sathya Perla2e588f82011-03-11 02:49:26 +00001816 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817}
1818
stephen hemminger49b05222010-10-21 07:50:48 +00001819static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820{
1821 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001822 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1823 struct be_adapter *adapter = rxo->adapter;
1824 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001825 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826 u32 work_done;
1827
Sathya Perlaac124ff2011-07-25 19:10:14 +00001828 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001830 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831 if (!rxcp)
1832 break;
1833
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001834 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001835 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001836 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001837 be_rx_compl_process_gro(adapter, rxo, rxcp);
1838 else
1839 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001840 } else if (rxcp->pkt_size == 0) {
1841 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001842 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001843
Sathya Perla2e588f82011-03-11 02:49:26 +00001844 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845 }
1846
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001848 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001849 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850
1851 /* All consumed */
1852 if (work_done < budget) {
1853 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001854 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 } else {
1856 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001857 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858 }
1859 return work_done;
1860}
1861
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001862/* As TX and MCC share the same EQ check for both TX and MCC completions.
1863 * For TX/MCC we don't honour budget; consume everything
1864 */
1865static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001867 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1868 struct be_adapter *adapter =
1869 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001870 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001872 int tx_compl, mcc_compl, status = 0;
1873 u8 i;
1874 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875
Sathya Perla3c8def92011-06-12 20:01:58 +00001876 for_all_tx_queues(adapter, txo, i) {
1877 tx_compl = 0;
1878 num_wrbs = 0;
1879 while ((txcp = be_tx_compl_get(&txo->cq))) {
1880 num_wrbs += be_tx_compl_process(adapter, txo,
1881 AMAP_GET_BITS(struct amap_eth_tx_compl,
1882 wrb_index, txcp));
1883 tx_compl++;
1884 }
1885 if (tx_compl) {
1886 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1887
1888 atomic_sub(num_wrbs, &txo->q.used);
1889
1890 /* As Tx wrbs have been freed up, wake up netdev queue
1891 * if it was stopped due to lack of tx wrbs. */
1892 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1893 atomic_read(&txo->q.used) < txo->q.len / 2) {
1894 netif_wake_subqueue(adapter->netdev, i);
1895 }
1896
Sathya Perlaab1594e2011-07-25 19:10:15 +00001897 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001898 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001899 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001900 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 }
1902
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001903 mcc_compl = be_process_mcc(adapter, &status);
1904
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001905 if (mcc_compl) {
1906 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1907 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1908 }
1909
Sathya Perla3c8def92011-06-12 20:01:58 +00001910 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001911
Sathya Perla3c8def92011-06-12 20:01:58 +00001912 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001913 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 return 1;
1915}
1916
Ajit Khaparded053de92010-09-03 06:23:30 +00001917void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001918{
1919 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1920 u32 i;
1921
1922 pci_read_config_dword(adapter->pdev,
1923 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1924 pci_read_config_dword(adapter->pdev,
1925 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1926 pci_read_config_dword(adapter->pdev,
1927 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1928 pci_read_config_dword(adapter->pdev,
1929 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1930
1931 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1932 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1933
Ajit Khaparded053de92010-09-03 06:23:30 +00001934 if (ue_status_lo || ue_status_hi) {
1935 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001936 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001937 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1938 }
1939
Ajit Khaparde7c185272010-07-29 06:16:33 +00001940 if (ue_status_lo) {
1941 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1942 if (ue_status_lo & 1)
1943 dev_err(&adapter->pdev->dev,
1944 "UE: %s bit set\n", ue_status_low_desc[i]);
1945 }
1946 }
1947 if (ue_status_hi) {
1948 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1949 if (ue_status_hi & 1)
1950 dev_err(&adapter->pdev->dev,
1951 "UE: %s bit set\n", ue_status_hi_desc[i]);
1952 }
1953 }
1954
1955}
1956
Sathya Perlaea1dae12009-03-19 23:56:20 -07001957static void be_worker(struct work_struct *work)
1958{
1959 struct be_adapter *adapter =
1960 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001961 struct be_rx_obj *rxo;
1962 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001963
Sathya Perla16da8252011-03-21 20:49:27 +00001964 if (!adapter->ue_detected && !lancer_chip(adapter))
1965 be_detect_dump_ue(adapter);
1966
Somnath Koturf203af72010-10-25 23:01:03 +00001967 /* when interrupts are not yet enabled, just reap any pending
1968 * mcc completions */
1969 if (!netif_running(adapter->netdev)) {
1970 int mcc_compl, status = 0;
1971
1972 mcc_compl = be_process_mcc(adapter, &status);
1973
1974 if (mcc_compl) {
1975 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1976 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1977 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00001978
Somnath Koturf203af72010-10-25 23:01:03 +00001979 goto reschedule;
1980 }
1981
Selvin Xavier005d5692011-05-16 07:36:35 +00001982 if (!adapter->stats_cmd_sent) {
1983 if (lancer_chip(adapter))
1984 lancer_cmd_get_pport_stats(adapter,
1985 &adapter->stats_cmd);
1986 else
1987 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1988 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001989
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 be_rx_eqd_update(adapter, rxo);
1992
1993 if (rxo->rx_post_starved) {
1994 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00001995 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07001996 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001997 }
1998
Somnath Koturf203af72010-10-25 23:01:03 +00001999reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002000 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002001 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2002}
2003
Sathya Perla8d56ff12009-11-22 22:02:26 +00002004static void be_msix_disable(struct be_adapter *adapter)
2005{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002006 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002007 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002008 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002009 }
2010}
2011
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012static void be_msix_enable(struct be_adapter *adapter)
2013{
Sathya Perla3abcded2010-10-03 22:12:27 -07002014#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002015 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002017 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002018
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002019 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 adapter->msix_entries[i].entry = i;
2021
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002022 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002023 if (status == 0) {
2024 goto done;
2025 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002026 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002027 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002028 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002029 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002030 }
2031 return;
2032done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002033 adapter->num_msix_vec = num_vec;
2034 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035}
2036
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002037static void be_sriov_enable(struct be_adapter *adapter)
2038{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002039 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002040#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002041 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002042 int status, pos;
2043 u16 nvfs;
2044
2045 pos = pci_find_ext_capability(adapter->pdev,
2046 PCI_EXT_CAP_ID_SRIOV);
2047 pci_read_config_word(adapter->pdev,
2048 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2049
2050 if (num_vfs > nvfs) {
2051 dev_info(&adapter->pdev->dev,
2052 "Device supports %d VFs and not %d\n",
2053 nvfs, num_vfs);
2054 num_vfs = nvfs;
2055 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002056
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002057 status = pci_enable_sriov(adapter->pdev, num_vfs);
2058 adapter->sriov_enabled = status ? false : true;
2059 }
2060#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002061}
2062
2063static void be_sriov_disable(struct be_adapter *adapter)
2064{
2065#ifdef CONFIG_PCI_IOV
2066 if (adapter->sriov_enabled) {
2067 pci_disable_sriov(adapter->pdev);
2068 adapter->sriov_enabled = false;
2069 }
2070#endif
2071}
2072
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002073static inline int be_msix_vec_get(struct be_adapter *adapter,
2074 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002076 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002077}
2078
2079static int be_request_irq(struct be_adapter *adapter,
2080 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002081 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002082{
2083 struct net_device *netdev = adapter->netdev;
2084 int vec;
2085
2086 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002087 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002088 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002089}
2090
Sathya Perla3abcded2010-10-03 22:12:27 -07002091static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2092 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002093{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002094 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002095 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002096}
2097
2098static int be_msix_register(struct be_adapter *adapter)
2099{
Sathya Perla3abcded2010-10-03 22:12:27 -07002100 struct be_rx_obj *rxo;
2101 int status, i;
2102 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002103
Sathya Perla3abcded2010-10-03 22:12:27 -07002104 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2105 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002106 if (status)
2107 goto err;
2108
Sathya Perla3abcded2010-10-03 22:12:27 -07002109 for_all_rx_queues(adapter, rxo, i) {
2110 sprintf(qname, "rxq%d", i);
2111 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2112 qname, rxo);
2113 if (status)
2114 goto err_msix;
2115 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002118
Sathya Perla3abcded2010-10-03 22:12:27 -07002119err_msix:
2120 be_free_irq(adapter, &adapter->tx_eq, adapter);
2121
2122 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2123 be_free_irq(adapter, &rxo->rx_eq, rxo);
2124
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125err:
2126 dev_warn(&adapter->pdev->dev,
2127 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002128 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129 return status;
2130}
2131
2132static int be_irq_register(struct be_adapter *adapter)
2133{
2134 struct net_device *netdev = adapter->netdev;
2135 int status;
2136
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002137 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138 status = be_msix_register(adapter);
2139 if (status == 0)
2140 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002141 /* INTx is not supported for VF */
2142 if (!be_physfn(adapter))
2143 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144 }
2145
2146 /* INTx */
2147 netdev->irq = adapter->pdev->irq;
2148 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2149 adapter);
2150 if (status) {
2151 dev_err(&adapter->pdev->dev,
2152 "INTx request IRQ failed - err %d\n", status);
2153 return status;
2154 }
2155done:
2156 adapter->isr_registered = true;
2157 return 0;
2158}
2159
2160static void be_irq_unregister(struct be_adapter *adapter)
2161{
2162 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002163 struct be_rx_obj *rxo;
2164 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165
2166 if (!adapter->isr_registered)
2167 return;
2168
2169 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002170 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171 free_irq(netdev->irq, adapter);
2172 goto done;
2173 }
2174
2175 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002176 be_free_irq(adapter, &adapter->tx_eq, adapter);
2177
2178 for_all_rx_queues(adapter, rxo, i)
2179 be_free_irq(adapter, &rxo->rx_eq, rxo);
2180
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002181done:
2182 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183}
2184
Sathya Perla482c9e72011-06-29 23:33:17 +00002185static void be_rx_queues_clear(struct be_adapter *adapter)
2186{
2187 struct be_queue_info *q;
2188 struct be_rx_obj *rxo;
2189 int i;
2190
2191 for_all_rx_queues(adapter, rxo, i) {
2192 q = &rxo->q;
2193 if (q->created) {
2194 be_cmd_rxq_destroy(adapter, q);
2195 /* After the rxq is invalidated, wait for a grace time
2196 * of 1ms for all dma to end and the flush compl to
2197 * arrive
2198 */
2199 mdelay(1);
2200 be_rx_q_clean(adapter, rxo);
2201 }
2202
2203 /* Clear any residual events */
2204 q = &rxo->rx_eq.q;
2205 if (q->created)
2206 be_eq_clean(adapter, &rxo->rx_eq);
2207 }
2208}
2209
Sathya Perla889cd4b2010-05-30 23:33:45 +00002210static int be_close(struct net_device *netdev)
2211{
2212 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002213 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002214 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002215 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002216 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002217
Sathya Perla889cd4b2010-05-30 23:33:45 +00002218 be_async_mcc_disable(adapter);
2219
Sathya Perla889cd4b2010-05-30 23:33:45 +00002220 netif_carrier_off(netdev);
2221 adapter->link_up = false;
2222
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002223 if (!lancer_chip(adapter))
2224 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002225
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002226 for_all_rx_queues(adapter, rxo, i)
2227 napi_disable(&rxo->rx_eq.napi);
2228
2229 napi_disable(&tx_eq->napi);
2230
2231 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002232 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2233 for_all_rx_queues(adapter, rxo, i)
2234 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002235 for_all_tx_queues(adapter, txo, i)
2236 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002237 }
2238
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002239 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002240 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002241 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002242
2243 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002244 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002245 synchronize_irq(vec);
2246 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002247 } else {
2248 synchronize_irq(netdev->irq);
2249 }
2250 be_irq_unregister(adapter);
2251
Sathya Perla889cd4b2010-05-30 23:33:45 +00002252 /* Wait for all pending tx completions to arrive so that
2253 * all tx skbs are freed.
2254 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002255 for_all_tx_queues(adapter, txo, i)
2256 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002257
Sathya Perla482c9e72011-06-29 23:33:17 +00002258 be_rx_queues_clear(adapter);
2259 return 0;
2260}
2261
2262static int be_rx_queues_setup(struct be_adapter *adapter)
2263{
2264 struct be_rx_obj *rxo;
2265 int rc, i;
2266 u8 rsstable[MAX_RSS_QS];
2267
2268 for_all_rx_queues(adapter, rxo, i) {
2269 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2270 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2271 adapter->if_handle,
2272 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2273 if (rc)
2274 return rc;
2275 }
2276
2277 if (be_multi_rxq(adapter)) {
2278 for_all_rss_queues(adapter, rxo, i)
2279 rsstable[i] = rxo->rss_id;
2280
2281 rc = be_cmd_rss_config(adapter, rsstable,
2282 adapter->num_rx_qs - 1);
2283 if (rc)
2284 return rc;
2285 }
2286
2287 /* First time posting */
2288 for_all_rx_queues(adapter, rxo, i) {
2289 be_post_rx_frags(rxo, GFP_KERNEL);
2290 napi_enable(&rxo->rx_eq.napi);
2291 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002292 return 0;
2293}
2294
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295static int be_open(struct net_device *netdev)
2296{
2297 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002300 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002301 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002302 u8 mac_speed;
2303 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002304
Sathya Perla482c9e72011-06-29 23:33:17 +00002305 status = be_rx_queues_setup(adapter);
2306 if (status)
2307 goto err;
2308
Sathya Perla5fb379e2009-06-18 00:02:59 +00002309 napi_enable(&tx_eq->napi);
2310
2311 be_irq_register(adapter);
2312
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002313 if (!lancer_chip(adapter))
2314 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002315
2316 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002317 for_all_rx_queues(adapter, rxo, i) {
2318 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2319 be_cq_notify(adapter, rxo->cq.id, true, 0);
2320 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002321 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002322
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002323 /* Now that interrupts are on we can process async mcc */
2324 be_async_mcc_enable(adapter);
2325
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002326 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002327 &link_speed, 0);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002328 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002329 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002330 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002331
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002332 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002333 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002334 if (status)
2335 goto err;
2336
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002337 status = be_cmd_set_flow_control(adapter,
2338 adapter->tx_fc, adapter->rx_fc);
2339 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002340 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002341 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002342
Sathya Perla889cd4b2010-05-30 23:33:45 +00002343 return 0;
2344err:
2345 be_close(adapter->netdev);
2346 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002347}
2348
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002349static int be_setup_wol(struct be_adapter *adapter, bool enable)
2350{
2351 struct be_dma_mem cmd;
2352 int status = 0;
2353 u8 mac[ETH_ALEN];
2354
2355 memset(mac, 0, ETH_ALEN);
2356
2357 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002358 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2359 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002360 if (cmd.va == NULL)
2361 return -1;
2362 memset(cmd.va, 0, cmd.size);
2363
2364 if (enable) {
2365 status = pci_write_config_dword(adapter->pdev,
2366 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2367 if (status) {
2368 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002369 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002370 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2371 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002372 return status;
2373 }
2374 status = be_cmd_enable_magic_wol(adapter,
2375 adapter->netdev->dev_addr, &cmd);
2376 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2377 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2378 } else {
2379 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2380 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2381 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2382 }
2383
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002384 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002385 return status;
2386}
2387
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002388/*
2389 * Generate a seed MAC address from the PF MAC Address using jhash.
2390 * MAC Address for VFs are assigned incrementally starting from the seed.
2391 * These addresses are programmed in the ASIC by the PF and the VF driver
2392 * queries for the MAC address during its probe.
2393 */
2394static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2395{
2396 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002397 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002398 u8 mac[ETH_ALEN];
2399
2400 be_vf_eth_addr_generate(adapter, mac);
2401
2402 for (vf = 0; vf < num_vfs; vf++) {
2403 status = be_cmd_pmac_add(adapter, mac,
2404 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002405 &adapter->vf_cfg[vf].vf_pmac_id,
2406 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002407 if (status)
2408 dev_err(&adapter->pdev->dev,
2409 "Mac address add failed for VF %d\n", vf);
2410 else
2411 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2412
2413 mac[5] += 1;
2414 }
2415 return status;
2416}
2417
2418static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2419{
2420 u32 vf;
2421
2422 for (vf = 0; vf < num_vfs; vf++) {
2423 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2424 be_cmd_pmac_del(adapter,
2425 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002426 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002427 }
2428}
2429
Sathya Perla5fb379e2009-06-18 00:02:59 +00002430static int be_setup(struct be_adapter *adapter)
2431{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002432 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002433 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002434 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002435 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002436
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002437 be_cmd_req_native_mode(adapter);
2438
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002439 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2440 BE_IF_FLAGS_BROADCAST |
2441 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002442
2443 if (be_physfn(adapter)) {
2444 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2445 BE_IF_FLAGS_PROMISCUOUS |
2446 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2447 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002448
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002449 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002450 cap_flags |= BE_IF_FLAGS_RSS;
2451 en_flags |= BE_IF_FLAGS_RSS;
2452 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002453 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002454
2455 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2456 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002457 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002458 if (status != 0)
2459 goto do_none;
2460
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002461 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002462 if (adapter->sriov_enabled) {
2463 while (vf < num_vfs) {
2464 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2465 BE_IF_FLAGS_BROADCAST;
2466 status = be_cmd_if_create(adapter, cap_flags,
2467 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002468 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002469 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002470 if (status) {
2471 dev_err(&adapter->pdev->dev,
2472 "Interface Create failed for VF %d\n",
2473 vf);
2474 goto if_destroy;
2475 }
2476 adapter->vf_cfg[vf].vf_pmac_id =
2477 BE_INVALID_PMAC_ID;
2478 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002479 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002480 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002481 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002482 status = be_cmd_mac_addr_query(adapter, mac,
2483 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2484 if (!status) {
2485 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2486 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2487 }
2488 }
2489
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490 status = be_tx_queues_create(adapter);
2491 if (status != 0)
2492 goto if_destroy;
2493
2494 status = be_rx_queues_create(adapter);
2495 if (status != 0)
2496 goto tx_qs_destroy;
2497
Sathya Perla2903dd62011-06-26 20:41:53 +00002498 /* Allow all priorities by default. A GRP5 evt may modify this */
2499 adapter->vlan_prio_bmap = 0xff;
2500
Sathya Perla5fb379e2009-06-18 00:02:59 +00002501 status = be_mcc_queues_create(adapter);
2502 if (status != 0)
2503 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002504
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002505 adapter->link_speed = -1;
2506
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002507 return 0;
2508
Sathya Perla5fb379e2009-06-18 00:02:59 +00002509rx_qs_destroy:
2510 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002511tx_qs_destroy:
2512 be_tx_queues_destroy(adapter);
2513if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002514 if (be_physfn(adapter) && adapter->sriov_enabled)
2515 for (vf = 0; vf < num_vfs; vf++)
2516 if (adapter->vf_cfg[vf].vf_if_handle)
2517 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002518 adapter->vf_cfg[vf].vf_if_handle,
2519 vf + 1);
2520 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521do_none:
2522 return status;
2523}
2524
Sathya Perla5fb379e2009-06-18 00:02:59 +00002525static int be_clear(struct be_adapter *adapter)
2526{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002527 int vf;
2528
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002529 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002530 be_vf_eth_addr_rem(adapter);
2531
Sathya Perla1a8887d2009-08-17 00:58:41 +00002532 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002533 be_rx_queues_destroy(adapter);
2534 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002535 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002536
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002537 if (be_physfn(adapter) && adapter->sriov_enabled)
2538 for (vf = 0; vf < num_vfs; vf++)
2539 if (adapter->vf_cfg[vf].vf_if_handle)
2540 be_cmd_if_destroy(adapter,
2541 adapter->vf_cfg[vf].vf_if_handle,
2542 vf + 1);
2543
Ajit Khaparde658681f2011-02-11 13:34:46 +00002544 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002545
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002546 adapter->be3_native = 0;
2547
Sathya Perla2243e2e2009-11-22 22:02:03 +00002548 /* tell fw we're done with firing cmds */
2549 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002550 return 0;
2551}
2552
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002553
Ajit Khaparde84517482009-09-04 03:12:16 +00002554#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002555static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002556 const u8 *p, u32 img_start, int image_size,
2557 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002558{
2559 u32 crc_offset;
2560 u8 flashed_crc[4];
2561 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002562
2563 crc_offset = hdr_size + img_start + image_size - 4;
2564
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002565 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002566
2567 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002568 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002569 if (status) {
2570 dev_err(&adapter->pdev->dev,
2571 "could not get crc from flash, not flashing redboot\n");
2572 return false;
2573 }
2574
2575 /*update redboot only if crc does not match*/
2576 if (!memcmp(flashed_crc, p, 4))
2577 return false;
2578 else
2579 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002580}
2581
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002582static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002583 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002584 struct be_dma_mem *flash_cmd, int num_of_images)
2585
Ajit Khaparde84517482009-09-04 03:12:16 +00002586{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002587 int status = 0, i, filehdr_size = 0;
2588 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002589 int num_bytes;
2590 const u8 *p = fw->data;
2591 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002592 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002593 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002594
Joe Perches215faf92010-12-21 02:16:10 -08002595 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002596 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2597 FLASH_IMAGE_MAX_SIZE_g3},
2598 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2599 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2600 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2601 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2602 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2603 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2604 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2605 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2606 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2607 FLASH_IMAGE_MAX_SIZE_g3},
2608 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2609 FLASH_IMAGE_MAX_SIZE_g3},
2610 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002611 FLASH_IMAGE_MAX_SIZE_g3},
2612 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2613 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002614 };
Joe Perches215faf92010-12-21 02:16:10 -08002615 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002616 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2617 FLASH_IMAGE_MAX_SIZE_g2},
2618 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2619 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2620 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2621 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2622 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2623 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2624 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2625 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2626 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2627 FLASH_IMAGE_MAX_SIZE_g2},
2628 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2629 FLASH_IMAGE_MAX_SIZE_g2},
2630 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2631 FLASH_IMAGE_MAX_SIZE_g2}
2632 };
2633
2634 if (adapter->generation == BE_GEN3) {
2635 pflashcomp = gen3_flash_types;
2636 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002637 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002638 } else {
2639 pflashcomp = gen2_flash_types;
2640 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002641 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002642 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002643 for (i = 0; i < num_comp; i++) {
2644 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2645 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2646 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002647 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2648 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002649 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2650 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002651 continue;
2652 p = fw->data;
2653 p += filehdr_size + pflashcomp[i].offset
2654 + (num_of_images * sizeof(struct image_hdr));
2655 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002656 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002657 total_bytes = pflashcomp[i].size;
2658 while (total_bytes) {
2659 if (total_bytes > 32*1024)
2660 num_bytes = 32*1024;
2661 else
2662 num_bytes = total_bytes;
2663 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002664
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002665 if (!total_bytes)
2666 flash_op = FLASHROM_OPER_FLASH;
2667 else
2668 flash_op = FLASHROM_OPER_SAVE;
2669 memcpy(req->params.data_buf, p, num_bytes);
2670 p += num_bytes;
2671 status = be_cmd_write_flashrom(adapter, flash_cmd,
2672 pflashcomp[i].optype, flash_op, num_bytes);
2673 if (status) {
2674 dev_err(&adapter->pdev->dev,
2675 "cmd to write to flash rom failed.\n");
2676 return -1;
2677 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002678 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002679 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002680 return 0;
2681}
2682
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002683static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2684{
2685 if (fhdr == NULL)
2686 return 0;
2687 if (fhdr->build[0] == '3')
2688 return BE_GEN3;
2689 else if (fhdr->build[0] == '2')
2690 return BE_GEN2;
2691 else
2692 return 0;
2693}
2694
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002695static int lancer_fw_download(struct be_adapter *adapter,
2696 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002697{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002698#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2699#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2700 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002701 const u8 *data_ptr = NULL;
2702 u8 *dest_image_ptr = NULL;
2703 size_t image_size = 0;
2704 u32 chunk_size = 0;
2705 u32 data_written = 0;
2706 u32 offset = 0;
2707 int status = 0;
2708 u8 add_status = 0;
2709
2710 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2711 dev_err(&adapter->pdev->dev,
2712 "FW Image not properly aligned. "
2713 "Length must be 4 byte aligned.\n");
2714 status = -EINVAL;
2715 goto lancer_fw_exit;
2716 }
2717
2718 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2719 + LANCER_FW_DOWNLOAD_CHUNK;
2720 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2721 &flash_cmd.dma, GFP_KERNEL);
2722 if (!flash_cmd.va) {
2723 status = -ENOMEM;
2724 dev_err(&adapter->pdev->dev,
2725 "Memory allocation failure while flashing\n");
2726 goto lancer_fw_exit;
2727 }
2728
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002729 dest_image_ptr = flash_cmd.va +
2730 sizeof(struct lancer_cmd_req_write_object);
2731 image_size = fw->size;
2732 data_ptr = fw->data;
2733
2734 while (image_size) {
2735 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2736
2737 /* Copy the image chunk content. */
2738 memcpy(dest_image_ptr, data_ptr, chunk_size);
2739
2740 status = lancer_cmd_write_object(adapter, &flash_cmd,
2741 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2742 &data_written, &add_status);
2743
2744 if (status)
2745 break;
2746
2747 offset += data_written;
2748 data_ptr += data_written;
2749 image_size -= data_written;
2750 }
2751
2752 if (!status) {
2753 /* Commit the FW written */
2754 status = lancer_cmd_write_object(adapter, &flash_cmd,
2755 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2756 &data_written, &add_status);
2757 }
2758
2759 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2760 flash_cmd.dma);
2761 if (status) {
2762 dev_err(&adapter->pdev->dev,
2763 "Firmware load error. "
2764 "Status code: 0x%x Additional Status: 0x%x\n",
2765 status, add_status);
2766 goto lancer_fw_exit;
2767 }
2768
2769 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2770lancer_fw_exit:
2771 return status;
2772}
2773
2774static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2775{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002776 struct flash_file_hdr_g2 *fhdr;
2777 struct flash_file_hdr_g3 *fhdr3;
2778 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002779 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002780 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002781 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002782
2783 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002784 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002785
Ajit Khaparde84517482009-09-04 03:12:16 +00002786 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002787 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2788 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002789 if (!flash_cmd.va) {
2790 status = -ENOMEM;
2791 dev_err(&adapter->pdev->dev,
2792 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002793 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002794 }
2795
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002796 if ((adapter->generation == BE_GEN3) &&
2797 (get_ufigen_type(fhdr) == BE_GEN3)) {
2798 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002799 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2800 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002801 img_hdr_ptr = (struct image_hdr *) (fw->data +
2802 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002803 i * sizeof(struct image_hdr)));
2804 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2805 status = be_flash_data(adapter, fw, &flash_cmd,
2806 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002807 }
2808 } else if ((adapter->generation == BE_GEN2) &&
2809 (get_ufigen_type(fhdr) == BE_GEN2)) {
2810 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2811 } else {
2812 dev_err(&adapter->pdev->dev,
2813 "UFI and Interface are not compatible for flashing\n");
2814 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002815 }
2816
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002817 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2818 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002819 if (status) {
2820 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002821 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002822 }
2823
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002824 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002825
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002826be_fw_exit:
2827 return status;
2828}
2829
2830int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2831{
2832 const struct firmware *fw;
2833 int status;
2834
2835 if (!netif_running(adapter->netdev)) {
2836 dev_err(&adapter->pdev->dev,
2837 "Firmware load not allowed (interface is down)\n");
2838 return -1;
2839 }
2840
2841 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2842 if (status)
2843 goto fw_exit;
2844
2845 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2846
2847 if (lancer_chip(adapter))
2848 status = lancer_fw_download(adapter, fw);
2849 else
2850 status = be_fw_download(adapter, fw);
2851
Ajit Khaparde84517482009-09-04 03:12:16 +00002852fw_exit:
2853 release_firmware(fw);
2854 return status;
2855}
2856
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002857static struct net_device_ops be_netdev_ops = {
2858 .ndo_open = be_open,
2859 .ndo_stop = be_close,
2860 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002861 .ndo_set_rx_mode = be_set_multicast_list,
2862 .ndo_set_mac_address = be_mac_addr_set,
2863 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00002864 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002865 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002866 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2867 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002868 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002869 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002870 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002871 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002872};
2873
2874static void be_netdev_init(struct net_device *netdev)
2875{
2876 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002877 struct be_rx_obj *rxo;
2878 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002879
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002880 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002881 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2882 NETIF_F_HW_VLAN_TX;
2883 if (be_multi_rxq(adapter))
2884 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002885
2886 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002887 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002888
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002889 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002890 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002891
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002892 netdev->flags |= IFF_MULTICAST;
2893
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002894 /* Default settings for Rx and Tx flow control */
2895 adapter->rx_fc = true;
2896 adapter->tx_fc = true;
2897
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002898 netif_set_gso_max_size(netdev, 65535);
2899
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002900 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2901
2902 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2903
Sathya Perla3abcded2010-10-03 22:12:27 -07002904 for_all_rx_queues(adapter, rxo, i)
2905 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2906 BE_NAPI_WEIGHT);
2907
Sathya Perla5fb379e2009-06-18 00:02:59 +00002908 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002909 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002910}
2911
2912static void be_unmap_pci_bars(struct be_adapter *adapter)
2913{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002914 if (adapter->csr)
2915 iounmap(adapter->csr);
2916 if (adapter->db)
2917 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002918 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002919 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002920}
2921
2922static int be_map_pci_bars(struct be_adapter *adapter)
2923{
2924 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002925 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002926
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002927 if (lancer_chip(adapter)) {
2928 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2929 pci_resource_len(adapter->pdev, 0));
2930 if (addr == NULL)
2931 return -ENOMEM;
2932 adapter->db = addr;
2933 return 0;
2934 }
2935
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002936 if (be_physfn(adapter)) {
2937 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2938 pci_resource_len(adapter->pdev, 2));
2939 if (addr == NULL)
2940 return -ENOMEM;
2941 adapter->csr = addr;
2942 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002943
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002944 if (adapter->generation == BE_GEN2) {
2945 pcicfg_reg = 1;
2946 db_reg = 4;
2947 } else {
2948 pcicfg_reg = 0;
2949 if (be_physfn(adapter))
2950 db_reg = 4;
2951 else
2952 db_reg = 0;
2953 }
2954 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2955 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002956 if (addr == NULL)
2957 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002958 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002960 if (be_physfn(adapter)) {
2961 addr = ioremap_nocache(
2962 pci_resource_start(adapter->pdev, pcicfg_reg),
2963 pci_resource_len(adapter->pdev, pcicfg_reg));
2964 if (addr == NULL)
2965 goto pci_map_err;
2966 adapter->pcicfg = addr;
2967 } else
2968 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002969
2970 return 0;
2971pci_map_err:
2972 be_unmap_pci_bars(adapter);
2973 return -ENOMEM;
2974}
2975
2976
2977static void be_ctrl_cleanup(struct be_adapter *adapter)
2978{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002979 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980
2981 be_unmap_pci_bars(adapter);
2982
2983 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002984 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2985 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002986
2987 mem = &adapter->mc_cmd_mem;
2988 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002989 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2990 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002991}
2992
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002993static int be_ctrl_init(struct be_adapter *adapter)
2994{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002995 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2996 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002997 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002999
3000 status = be_map_pci_bars(adapter);
3001 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003002 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003003
3004 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003005 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3006 mbox_mem_alloc->size,
3007 &mbox_mem_alloc->dma,
3008 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003009 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003010 status = -ENOMEM;
3011 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003012 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00003013
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003014 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3015 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3016 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3017 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003018
3019 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003020 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3021 mc_cmd_mem->size, &mc_cmd_mem->dma,
3022 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003023 if (mc_cmd_mem->va == NULL) {
3024 status = -ENOMEM;
3025 goto free_mbox;
3026 }
3027 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3028
Ivan Vecera29849612010-12-14 05:43:19 +00003029 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003030 spin_lock_init(&adapter->mcc_lock);
3031 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003032
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003033 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003034 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003035 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003036
3037free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003038 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3039 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003040
3041unmap_pci_bars:
3042 be_unmap_pci_bars(adapter);
3043
3044done:
3045 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003046}
3047
3048static void be_stats_cleanup(struct be_adapter *adapter)
3049{
Sathya Perla3abcded2010-10-03 22:12:27 -07003050 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003051
3052 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003053 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3054 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003055}
3056
3057static int be_stats_init(struct be_adapter *adapter)
3058{
Sathya Perla3abcded2010-10-03 22:12:27 -07003059 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003060
Selvin Xavier005d5692011-05-16 07:36:35 +00003061 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003062 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003063 } else {
3064 if (lancer_chip(adapter))
3065 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3066 else
3067 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3068 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003069 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3070 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071 if (cmd->va == NULL)
3072 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003073 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003074 return 0;
3075}
3076
3077static void __devexit be_remove(struct pci_dev *pdev)
3078{
3079 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003080
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003081 if (!adapter)
3082 return;
3083
Somnath Koturf203af72010-10-25 23:01:03 +00003084 cancel_delayed_work_sync(&adapter->work);
3085
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086 unregister_netdev(adapter->netdev);
3087
Sathya Perla5fb379e2009-06-18 00:02:59 +00003088 be_clear(adapter);
3089
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090 be_stats_cleanup(adapter);
3091
3092 be_ctrl_cleanup(adapter);
3093
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003094 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003095 be_sriov_disable(adapter);
3096
Sathya Perla8d56ff12009-11-22 22:02:26 +00003097 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003098
3099 pci_set_drvdata(pdev, NULL);
3100 pci_release_regions(pdev);
3101 pci_disable_device(pdev);
3102
3103 free_netdev(adapter->netdev);
3104}
3105
Sathya Perla2243e2e2009-11-22 22:02:03 +00003106static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003107{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003108 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003109 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003110
Sathya Perla8788fdc2009-07-27 22:52:03 +00003111 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003112 if (status)
3113 return status;
3114
Sathya Perla3abcded2010-10-03 22:12:27 -07003115 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3116 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003117 if (status)
3118 return status;
3119
3120 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003121
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003122 /* A default permanent address is given to each VF for Lancer*/
3123 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003124 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003125 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003126
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003127 if (status)
3128 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003129
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003130 if (!is_valid_ether_addr(mac))
3131 return -EADDRNOTAVAIL;
3132
3133 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3134 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3135 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003136
Ajit Khaparde3486be22010-07-23 02:04:54 +00003137 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003138 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3139 else
3140 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3141
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003142 status = be_cmd_get_cntl_attributes(adapter);
3143 if (status)
3144 return status;
3145
Sathya Perla3c8def92011-06-12 20:01:58 +00003146 if ((num_vfs && adapter->sriov_enabled) ||
3147 (adapter->function_mode & 0x400) ||
3148 lancer_chip(adapter) || !be_physfn(adapter)) {
3149 adapter->num_tx_qs = 1;
3150 netif_set_real_num_tx_queues(adapter->netdev,
3151 adapter->num_tx_qs);
3152 } else {
3153 adapter->num_tx_qs = MAX_TX_QS;
3154 }
3155
Sathya Perla2243e2e2009-11-22 22:02:03 +00003156 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003157}
3158
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003159static int be_dev_family_check(struct be_adapter *adapter)
3160{
3161 struct pci_dev *pdev = adapter->pdev;
3162 u32 sli_intf = 0, if_type;
3163
3164 switch (pdev->device) {
3165 case BE_DEVICE_ID1:
3166 case OC_DEVICE_ID1:
3167 adapter->generation = BE_GEN2;
3168 break;
3169 case BE_DEVICE_ID2:
3170 case OC_DEVICE_ID2:
3171 adapter->generation = BE_GEN3;
3172 break;
3173 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003174 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003175 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3176 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3177 SLI_INTF_IF_TYPE_SHIFT;
3178
3179 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3180 if_type != 0x02) {
3181 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3182 return -EINVAL;
3183 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003184 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3185 SLI_INTF_FAMILY_SHIFT);
3186 adapter->generation = BE_GEN3;
3187 break;
3188 default:
3189 adapter->generation = 0;
3190 }
3191 return 0;
3192}
3193
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003194static int lancer_wait_ready(struct be_adapter *adapter)
3195{
3196#define SLIPORT_READY_TIMEOUT 500
3197 u32 sliport_status;
3198 int status = 0, i;
3199
3200 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3201 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3202 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3203 break;
3204
3205 msleep(20);
3206 }
3207
3208 if (i == SLIPORT_READY_TIMEOUT)
3209 status = -1;
3210
3211 return status;
3212}
3213
3214static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3215{
3216 int status;
3217 u32 sliport_status, err, reset_needed;
3218 status = lancer_wait_ready(adapter);
3219 if (!status) {
3220 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3221 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3222 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3223 if (err && reset_needed) {
3224 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3225 adapter->db + SLIPORT_CONTROL_OFFSET);
3226
3227 /* check adapter has corrected the error */
3228 status = lancer_wait_ready(adapter);
3229 sliport_status = ioread32(adapter->db +
3230 SLIPORT_STATUS_OFFSET);
3231 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3232 SLIPORT_STATUS_RN_MASK);
3233 if (status || sliport_status)
3234 status = -1;
3235 } else if (err || reset_needed) {
3236 status = -1;
3237 }
3238 }
3239 return status;
3240}
3241
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003242static int __devinit be_probe(struct pci_dev *pdev,
3243 const struct pci_device_id *pdev_id)
3244{
3245 int status = 0;
3246 struct be_adapter *adapter;
3247 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003248
3249 status = pci_enable_device(pdev);
3250 if (status)
3251 goto do_none;
3252
3253 status = pci_request_regions(pdev, DRV_NAME);
3254 if (status)
3255 goto disable_dev;
3256 pci_set_master(pdev);
3257
Sathya Perla3c8def92011-06-12 20:01:58 +00003258 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003259 if (netdev == NULL) {
3260 status = -ENOMEM;
3261 goto rel_reg;
3262 }
3263 adapter = netdev_priv(netdev);
3264 adapter->pdev = pdev;
3265 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003266
3267 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003268 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003269 goto free_netdev;
3270
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003271 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003272 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003273
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003274 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003275 if (!status) {
3276 netdev->features |= NETIF_F_HIGHDMA;
3277 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003278 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003279 if (status) {
3280 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3281 goto free_netdev;
3282 }
3283 }
3284
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003285 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003286 if (adapter->sriov_enabled) {
3287 adapter->vf_cfg = kcalloc(num_vfs,
3288 sizeof(struct be_vf_cfg), GFP_KERNEL);
3289
3290 if (!adapter->vf_cfg)
3291 goto free_netdev;
3292 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003293
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003294 status = be_ctrl_init(adapter);
3295 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003296 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003297
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003298 if (lancer_chip(adapter)) {
3299 status = lancer_test_and_set_rdy_state(adapter);
3300 if (status) {
3301 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003302 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003303 }
3304 }
3305
Sathya Perla2243e2e2009-11-22 22:02:03 +00003306 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003307 if (be_physfn(adapter)) {
3308 status = be_cmd_POST(adapter);
3309 if (status)
3310 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003311 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003312
3313 /* tell fw we're ready to fire cmds */
3314 status = be_cmd_fw_init(adapter);
3315 if (status)
3316 goto ctrl_clean;
3317
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003318 status = be_cmd_reset_function(adapter);
3319 if (status)
3320 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003321
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003322 status = be_stats_init(adapter);
3323 if (status)
3324 goto ctrl_clean;
3325
Sathya Perla2243e2e2009-11-22 22:02:03 +00003326 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327 if (status)
3328 goto stats_clean;
3329
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003330 /* The INTR bit may be set in the card when probed by a kdump kernel
3331 * after a crash.
3332 */
3333 if (!lancer_chip(adapter))
3334 be_intr_set(adapter, false);
3335
Sathya Perla3abcded2010-10-03 22:12:27 -07003336 be_msix_enable(adapter);
3337
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003338 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003339
Sathya Perla5fb379e2009-06-18 00:02:59 +00003340 status = be_setup(adapter);
3341 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003342 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003343
Sathya Perla3abcded2010-10-03 22:12:27 -07003344 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003345 status = register_netdev(netdev);
3346 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003347 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003348 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003349
Ajit Khapardee6319362011-02-11 13:35:41 +00003350 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003351 u8 mac_speed;
3352 bool link_up;
3353 u16 vf, lnk_speed;
3354
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003355 if (!lancer_chip(adapter)) {
3356 status = be_vf_eth_addr_config(adapter);
3357 if (status)
3358 goto unreg_netdev;
3359 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003360
3361 for (vf = 0; vf < num_vfs; vf++) {
3362 status = be_cmd_link_status_query(adapter, &link_up,
3363 &mac_speed, &lnk_speed, vf + 1);
3364 if (!status)
3365 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3366 else
3367 goto unreg_netdev;
3368 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003369 }
3370
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003371 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003372
Somnath Koturf203af72010-10-25 23:01:03 +00003373 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003374 return 0;
3375
Ajit Khapardee6319362011-02-11 13:35:41 +00003376unreg_netdev:
3377 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003378unsetup:
3379 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003380msix_disable:
3381 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003382stats_clean:
3383 be_stats_cleanup(adapter);
3384ctrl_clean:
3385 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003386free_vf_cfg:
3387 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003388free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003389 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003390 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003391 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003392rel_reg:
3393 pci_release_regions(pdev);
3394disable_dev:
3395 pci_disable_device(pdev);
3396do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003397 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003398 return status;
3399}
3400
3401static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3402{
3403 struct be_adapter *adapter = pci_get_drvdata(pdev);
3404 struct net_device *netdev = adapter->netdev;
3405
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003406 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003407 if (adapter->wol)
3408 be_setup_wol(adapter, true);
3409
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003410 netif_device_detach(netdev);
3411 if (netif_running(netdev)) {
3412 rtnl_lock();
3413 be_close(netdev);
3414 rtnl_unlock();
3415 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003416 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003417 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003418
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003419 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003420 pci_save_state(pdev);
3421 pci_disable_device(pdev);
3422 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3423 return 0;
3424}
3425
3426static int be_resume(struct pci_dev *pdev)
3427{
3428 int status = 0;
3429 struct be_adapter *adapter = pci_get_drvdata(pdev);
3430 struct net_device *netdev = adapter->netdev;
3431
3432 netif_device_detach(netdev);
3433
3434 status = pci_enable_device(pdev);
3435 if (status)
3436 return status;
3437
3438 pci_set_power_state(pdev, 0);
3439 pci_restore_state(pdev);
3440
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003441 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003442 /* tell fw we're ready to fire cmds */
3443 status = be_cmd_fw_init(adapter);
3444 if (status)
3445 return status;
3446
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003447 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003448 if (netif_running(netdev)) {
3449 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003450 be_open(netdev);
3451 rtnl_unlock();
3452 }
3453 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003454
3455 if (adapter->wol)
3456 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003457
3458 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003459 return 0;
3460}
3461
Sathya Perla82456b02010-02-17 01:35:37 +00003462/*
3463 * An FLR will stop BE from DMAing any data.
3464 */
3465static void be_shutdown(struct pci_dev *pdev)
3466{
3467 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003468
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003469 if (!adapter)
3470 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003471
Sathya Perla0f4a6822011-03-21 20:49:28 +00003472 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003473
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003474 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003475
Sathya Perla82456b02010-02-17 01:35:37 +00003476 if (adapter->wol)
3477 be_setup_wol(adapter, true);
3478
Ajit Khaparde57841862011-04-06 18:08:43 +00003479 be_cmd_reset_function(adapter);
3480
Sathya Perla82456b02010-02-17 01:35:37 +00003481 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003482}
3483
Sathya Perlacf588472010-02-14 21:22:01 +00003484static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3485 pci_channel_state_t state)
3486{
3487 struct be_adapter *adapter = pci_get_drvdata(pdev);
3488 struct net_device *netdev = adapter->netdev;
3489
3490 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3491
3492 adapter->eeh_err = true;
3493
3494 netif_device_detach(netdev);
3495
3496 if (netif_running(netdev)) {
3497 rtnl_lock();
3498 be_close(netdev);
3499 rtnl_unlock();
3500 }
3501 be_clear(adapter);
3502
3503 if (state == pci_channel_io_perm_failure)
3504 return PCI_ERS_RESULT_DISCONNECT;
3505
3506 pci_disable_device(pdev);
3507
3508 return PCI_ERS_RESULT_NEED_RESET;
3509}
3510
3511static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3512{
3513 struct be_adapter *adapter = pci_get_drvdata(pdev);
3514 int status;
3515
3516 dev_info(&adapter->pdev->dev, "EEH reset\n");
3517 adapter->eeh_err = false;
3518
3519 status = pci_enable_device(pdev);
3520 if (status)
3521 return PCI_ERS_RESULT_DISCONNECT;
3522
3523 pci_set_master(pdev);
3524 pci_set_power_state(pdev, 0);
3525 pci_restore_state(pdev);
3526
3527 /* Check if card is ok and fw is ready */
3528 status = be_cmd_POST(adapter);
3529 if (status)
3530 return PCI_ERS_RESULT_DISCONNECT;
3531
3532 return PCI_ERS_RESULT_RECOVERED;
3533}
3534
3535static void be_eeh_resume(struct pci_dev *pdev)
3536{
3537 int status = 0;
3538 struct be_adapter *adapter = pci_get_drvdata(pdev);
3539 struct net_device *netdev = adapter->netdev;
3540
3541 dev_info(&adapter->pdev->dev, "EEH resume\n");
3542
3543 pci_save_state(pdev);
3544
3545 /* tell fw we're ready to fire cmds */
3546 status = be_cmd_fw_init(adapter);
3547 if (status)
3548 goto err;
3549
3550 status = be_setup(adapter);
3551 if (status)
3552 goto err;
3553
3554 if (netif_running(netdev)) {
3555 status = be_open(netdev);
3556 if (status)
3557 goto err;
3558 }
3559 netif_device_attach(netdev);
3560 return;
3561err:
3562 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003563}
3564
3565static struct pci_error_handlers be_eeh_handlers = {
3566 .error_detected = be_eeh_err_detected,
3567 .slot_reset = be_eeh_reset,
3568 .resume = be_eeh_resume,
3569};
3570
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003571static struct pci_driver be_driver = {
3572 .name = DRV_NAME,
3573 .id_table = be_dev_ids,
3574 .probe = be_probe,
3575 .remove = be_remove,
3576 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003577 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003578 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003579 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003580};
3581
3582static int __init be_init_module(void)
3583{
Joe Perches8e95a202009-12-03 07:58:21 +00003584 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3585 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003586 printk(KERN_WARNING DRV_NAME
3587 " : Module param rx_frag_size must be 2048/4096/8192."
3588 " Using 2048\n");
3589 rx_frag_size = 2048;
3590 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003591
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003592 return pci_register_driver(&be_driver);
3593}
3594module_init(be_init_module);
3595
3596static void __exit be_exit_module(void)
3597{
3598 pci_unregister_driver(&be_driver);
3599}
3600module_exit(be_exit_module);