blob: 5890bca01c07230f827eda162a5870d08c9d102b [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147
Sathya Perlacf588472010-02-14 21:22:01 +0000148 if (adapter->eeh_err)
149 return;
150
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 iowrite32(reg, addr);
159}
160
Sathya Perla8788fdc2009-07-27 22:52:03 +0000161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000166
167 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000176
177 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179}
180
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000188
189 if (adapter->eeh_err)
190 return;
191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000207
208 if (adapter->eeh_err)
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
Ajit Khapardef8617e02011-02-11 13:36:37 +0000232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000234 if (status)
235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000238 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000239netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000246static void populate_be2_stats(struct be_adapter *adapter)
247{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000251 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000254
Sathya Perlaac124ff2011-07-25 19:10:14 +0000255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000281 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000282 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000283 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
Selvin Xavier005d5692011-05-16 07:36:35 +0000340static void populate_lancer_stats(struct be_adapter *adapter)
341{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000342
Selvin Xavier005d5692011-05-16 07:36:35 +0000343 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000374 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000376}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377
378void be_parse_stats(struct be_adapter *adapter)
379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397}
398
Sathya Perlaab1594e2011-07-25 19:10:15 +0000399static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700401{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000402 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700404 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000405 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000406 u64 pkts, bytes;
407 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700408 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409
Sathya Perla3abcded2010-10-03 22:12:27 -0700410 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700422 }
423
Sathya Perla3c8def92011-06-12 20:01:58 +0000424 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000433 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700434
435 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000445 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700446
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700447 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000448 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000451
Sathya Perlaab1594e2011-07-25 19:10:15 +0000452 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700453
454 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000456
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000462 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463}
464
Sathya Perlaea172a02011-08-02 19:57:42 +0000465void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700466{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467 struct net_device *netdev = adapter->netdev;
468
Sathya Perlaea172a02011-08-02 19:57:42 +0000469 /* when link status changes, link speed must be re-queried from card */
470 adapter->link_speed = -1;
471 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
472 netif_carrier_on(netdev);
473 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
474 } else {
475 netif_carrier_off(netdev);
476 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700477 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700478}
479
Sathya Perla3c8def92011-06-12 20:01:58 +0000480static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000481 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482{
Sathya Perla3c8def92011-06-12 20:01:58 +0000483 struct be_tx_stats *stats = tx_stats(txo);
484
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000486 stats->tx_reqs++;
487 stats->tx_wrbs += wrb_cnt;
488 stats->tx_bytes += copied;
489 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493}
494
495/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000496static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
497 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700499 int cnt = (skb->len > skb->data_len);
500
501 cnt += skb_shinfo(skb)->nr_frags;
502
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503 /* to account for hdr wrb */
504 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000505 if (lancer_chip(adapter) || !(cnt & 1)) {
506 *dummy = false;
507 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508 /* add a dummy to make it an even num */
509 cnt++;
510 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
513 return cnt;
514}
515
516static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
517{
518 wrb->frag_pa_hi = upper_32_bits(addr);
519 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
520 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
521}
522
Somnath Koturcc4ce022010-10-21 07:11:14 -0700523static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
524 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700526 u8 vlan_prio = 0;
527 u16 vlan_tag = 0;
528
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529 memset(hdr, 0, sizeof(*hdr));
530
531 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
532
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000533 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
535 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
536 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000537 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000539 if (lancer_chip(adapter) && adapter->sli_family ==
540 LANCER_A0_SLI_FAMILY) {
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
542 if (is_tcp_pkt(skb))
543 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
544 tcpcs, hdr, 1);
545 else if (is_udp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 udpcs, hdr, 1);
548 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
550 if (is_tcp_pkt(skb))
551 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
552 else if (is_udp_pkt(skb))
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
554 }
555
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700556 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700558 vlan_tag = vlan_tx_tag_get(skb);
559 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
560 /* If vlan priority provided by OS is NOT in available bmap */
561 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
562 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
563 adapter->recommended_prio;
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700565 }
566
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
571}
572
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000573static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000574 bool unmap_single)
575{
576 dma_addr_t dma;
577
578 be_dws_le_to_cpu(wrb, sizeof(*wrb));
579
580 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000581 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000582 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000583 dma_unmap_single(dev, dma, wrb->frag_len,
584 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000585 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000586 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000587 }
588}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589
Sathya Perla3c8def92011-06-12 20:01:58 +0000590static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
592{
Sathya Perla7101e112010-03-22 20:41:12 +0000593 dma_addr_t busaddr;
594 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000595 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 struct be_eth_wrb *wrb;
598 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000599 bool map_single = false;
600 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602 hdr = queue_head_node(txq);
603 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000604 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605
David S. Millerebc8d2a2009-06-09 01:01:31 -0700606 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700607 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000608 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
609 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000610 goto dma_err;
611 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700612 wrb = queue_head_node(txq);
613 wrb_fill(wrb, busaddr, len);
614 be_dws_cpu_to_le(wrb, sizeof(*wrb));
615 queue_head_inc(txq);
616 copied += len;
617 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618
David S. Millerebc8d2a2009-06-09 01:01:31 -0700619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
620 struct skb_frag_struct *frag =
621 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000622 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
623 frag->size, DMA_TO_DEVICE);
624 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000625 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700626 wrb = queue_head_node(txq);
627 wrb_fill(wrb, busaddr, frag->size);
628 be_dws_cpu_to_le(wrb, sizeof(*wrb));
629 queue_head_inc(txq);
630 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631 }
632
633 if (dummy_wrb) {
634 wrb = queue_head_node(txq);
635 wrb_fill(wrb, 0, 0);
636 be_dws_cpu_to_le(wrb, sizeof(*wrb));
637 queue_head_inc(txq);
638 }
639
Somnath Koturcc4ce022010-10-21 07:11:14 -0700640 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 be_dws_cpu_to_le(hdr, sizeof(*hdr));
642
643 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000644dma_err:
645 txq->head = map_head;
646 while (copied) {
647 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000648 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000649 map_single = false;
650 copied -= wrb->frag_len;
651 queue_head_inc(txq);
652 }
653 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654}
655
Stephen Hemminger613573252009-08-31 19:50:58 +0000656static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700657 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658{
659 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000660 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
661 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 u32 wrb_cnt = 0, copied = 0;
663 u32 start = txq->head;
664 bool dummy_wrb, stopped = false;
665
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000666 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667
Sathya Perla3c8def92011-06-12 20:01:58 +0000668 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000669 if (copied) {
670 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000671 BUG_ON(txo->sent_skb_list[start]);
672 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000674 /* Ensure txq has space for the next skb; Else stop the queue
675 * *BEFORE* ringing the tx doorbell, so that we serialze the
676 * tx compls of the current transmit which'll wake up the queue
677 */
Sathya Perla7101e112010-03-22 20:41:12 +0000678 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000679 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
680 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000681 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000682 stopped = true;
683 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000685 be_txq_notify(adapter, txq->id, wrb_cnt);
686
Sathya Perla3c8def92011-06-12 20:01:58 +0000687 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000688 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000689 } else {
690 txq->head = start;
691 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 return NETDEV_TX_OK;
694}
695
696static int be_change_mtu(struct net_device *netdev, int new_mtu)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000700 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
701 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 dev_info(&adapter->pdev->dev,
703 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000704 BE_MIN_MTU,
705 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706 return -EINVAL;
707 }
708 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
709 netdev->mtu, new_mtu);
710 netdev->mtu = new_mtu;
711 return 0;
712}
713
714/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000715 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
716 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000718static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 u16 vtag[BE_NUM_VLANS_SUPPORTED];
721 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000722 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000723 u32 if_handle;
724
725 if (vf) {
726 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
727 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
728 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
729 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000731 /* No need to further configure vids if in promiscuous mode */
732 if (adapter->promiscuous)
733 return 0;
734
Ajit Khaparde82903e42010-02-09 01:34:57 +0000735 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000737 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738 if (adapter->vlan_tag[i]) {
739 vtag[ntags] = cpu_to_le16(i);
740 ntags++;
741 }
742 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700743 status = be_cmd_vlan_config(adapter, adapter->if_handle,
744 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700746 status = be_cmd_vlan_config(adapter, adapter->if_handle,
747 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000749
Sathya Perlab31c50a2009-09-17 10:30:13 -0700750 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751}
752
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
754{
755 struct be_adapter *adapter = netdev_priv(netdev);
756
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000757 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000758 if (!be_physfn(adapter))
759 return;
760
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000762 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000763 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764}
765
766static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000770 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000771
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000772 if (!be_physfn(adapter))
773 return;
774
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000776 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000777 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778}
779
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780static void be_set_multicast_list(struct net_device *netdev)
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
783
784 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000785 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000786 adapter->promiscuous = true;
787 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000789
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300790 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000791 if (adapter->promiscuous) {
792 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000793 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794
795 if (adapter->vlans_added)
796 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000797 }
798
Sathya Perlae7b909a2009-11-22 22:01:10 +0000799 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000800 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000801 netdev_mc_count(netdev) > BE_MAX_MC) {
802 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000803 goto done;
804 }
805
Sathya Perla5b8821b2011-08-02 19:57:44 +0000806 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000807done:
808 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809}
810
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000811static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
812{
813 struct be_adapter *adapter = netdev_priv(netdev);
814 int status;
815
816 if (!adapter->sriov_enabled)
817 return -EPERM;
818
819 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
820 return -EINVAL;
821
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000822 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
823 status = be_cmd_pmac_del(adapter,
824 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000825 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000826
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000827 status = be_cmd_pmac_add(adapter, mac,
828 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000829 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000830
831 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000832 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
833 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000834 else
835 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
836
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000837 return status;
838}
839
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000840static int be_get_vf_config(struct net_device *netdev, int vf,
841 struct ifla_vf_info *vi)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (vf >= num_vfs)
849 return -EINVAL;
850
851 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000852 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000853 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000854 vi->qos = 0;
855 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
856
857 return 0;
858}
859
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000860static int be_set_vf_vlan(struct net_device *netdev,
861 int vf, u16 vlan, u8 qos)
862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864 int status = 0;
865
866 if (!adapter->sriov_enabled)
867 return -EPERM;
868
869 if ((vf >= num_vfs) || (vlan > 4095))
870 return -EINVAL;
871
872 if (vlan) {
873 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
874 adapter->vlans_added++;
875 } else {
876 adapter->vf_cfg[vf].vf_vlan_tag = 0;
877 adapter->vlans_added--;
878 }
879
880 status = be_vid_config(adapter, true, vf);
881
882 if (status)
883 dev_info(&adapter->pdev->dev,
884 "VLAN %d config on VF %d failed\n", vlan, vf);
885 return status;
886}
887
Ajit Khapardee1d18732010-07-23 01:52:13 +0000888static int be_set_vf_tx_rate(struct net_device *netdev,
889 int vf, int rate)
890{
891 struct be_adapter *adapter = netdev_priv(netdev);
892 int status = 0;
893
894 if (!adapter->sriov_enabled)
895 return -EPERM;
896
897 if ((vf >= num_vfs) || (rate < 0))
898 return -EINVAL;
899
900 if (rate > 10000)
901 rate = 10000;
902
903 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000904 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000905
906 if (status)
907 dev_info(&adapter->pdev->dev,
908 "tx rate %d on VF %d failed\n", rate, vf);
909 return status;
910}
911
Sathya Perlaac124ff2011-07-25 19:10:14 +0000912static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700913{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000914 struct be_eq_obj *rx_eq = &rxo->rx_eq;
915 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700916 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000917 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000918 u64 pkts;
919 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000920
921 if (!rx_eq->enable_aic)
922 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700923
Sathya Perla4097f662009-03-24 16:40:13 -0700924 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700925 if (time_before(now, stats->rx_jiffies)) {
926 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700927 return;
928 }
929
Sathya Perlaac124ff2011-07-25 19:10:14 +0000930 /* Update once a second */
931 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700932 return;
933
Sathya Perlaab1594e2011-07-25 19:10:15 +0000934 do {
935 start = u64_stats_fetch_begin_bh(&stats->sync);
936 pkts = stats->rx_pkts;
937 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
938
939 stats->rx_pps = (pkts - stats->rx_pkts_prev) / (delta / HZ);
940 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700941 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000942 eqd = stats->rx_pps / 110000;
943 eqd = eqd << 3;
944 if (eqd > rx_eq->max_eqd)
945 eqd = rx_eq->max_eqd;
946 if (eqd < rx_eq->min_eqd)
947 eqd = rx_eq->min_eqd;
948 if (eqd < 10)
949 eqd = 0;
950 if (eqd != rx_eq->cur_eqd) {
951 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
952 rx_eq->cur_eqd = eqd;
953 }
Sathya Perla4097f662009-03-24 16:40:13 -0700954}
955
Sathya Perla3abcded2010-10-03 22:12:27 -0700956static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000957 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700958{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000959 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700960
Sathya Perlaab1594e2011-07-25 19:10:15 +0000961 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700962 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000963 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700964 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000965 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700966 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000967 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000968 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000969 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970}
971
Sathya Perla2e588f82011-03-11 02:49:26 +0000972static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700973{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000974 /* L4 checksum is not reliable for non TCP/UDP packets.
975 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
977 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700978}
979
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700980static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700981get_rx_page_info(struct be_adapter *adapter,
982 struct be_rx_obj *rxo,
983 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984{
985 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700986 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987
Sathya Perla3abcded2010-10-03 22:12:27 -0700988 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989 BUG_ON(!rx_page_info->page);
990
Ajit Khaparde205859a2010-02-09 01:34:21 +0000991 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000992 dma_unmap_page(&adapter->pdev->dev,
993 dma_unmap_addr(rx_page_info, bus),
994 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000995 rx_page_info->last_page_user = false;
996 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700997
998 atomic_dec(&rxq->used);
999 return rx_page_info;
1000}
1001
1002/* Throwaway the data in the Rx completion */
1003static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001004 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001005 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006{
Sathya Perla3abcded2010-10-03 22:12:27 -07001007 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001009 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001011 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001012 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001013 put_page(page_info->page);
1014 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001015 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016 }
1017}
1018
1019/*
1020 * skb_fill_rx_data forms a complete skb for an ether frame
1021 * indicated by rxcp.
1022 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001023static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001024 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025{
Sathya Perla3abcded2010-10-03 22:12:27 -07001026 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001028 u16 i, j;
1029 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030 u8 *start;
1031
Sathya Perla2e588f82011-03-11 02:49:26 +00001032 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033 start = page_address(page_info->page) + page_info->page_offset;
1034 prefetch(start);
1035
1036 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038
1039 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001040 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041 memcpy(skb->data, start, hdr_len);
1042 skb->len = curr_frag_len;
1043 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1044 /* Complete packet has now been moved to data */
1045 put_page(page_info->page);
1046 skb->data_len = 0;
1047 skb->tail += curr_frag_len;
1048 } else {
1049 skb_shinfo(skb)->nr_frags = 1;
1050 skb_shinfo(skb)->frags[0].page = page_info->page;
1051 skb_shinfo(skb)->frags[0].page_offset =
1052 page_info->page_offset + hdr_len;
1053 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1054 skb->data_len = curr_frag_len - hdr_len;
1055 skb->tail += hdr_len;
1056 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001057 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058
Sathya Perla2e588f82011-03-11 02:49:26 +00001059 if (rxcp->pkt_size <= rx_frag_size) {
1060 BUG_ON(rxcp->num_rcvd != 1);
1061 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 }
1063
1064 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 remaining = rxcp->pkt_size - curr_frag_len;
1067 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1068 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1069 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001071 /* Coalesce all frags from the same physical page in one slot */
1072 if (page_info->page_offset == 0) {
1073 /* Fresh page */
1074 j++;
1075 skb_shinfo(skb)->frags[j].page = page_info->page;
1076 skb_shinfo(skb)->frags[j].page_offset =
1077 page_info->page_offset;
1078 skb_shinfo(skb)->frags[j].size = 0;
1079 skb_shinfo(skb)->nr_frags++;
1080 } else {
1081 put_page(page_info->page);
1082 }
1083
1084 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085 skb->len += curr_frag_len;
1086 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087
Sathya Perla2e588f82011-03-11 02:49:26 +00001088 remaining -= curr_frag_len;
1089 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001090 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001092 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093}
1094
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001095/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001097 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001098 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001100 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001102
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001103 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001104 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001105 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001106 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107 return;
1108 }
1109
Sathya Perla2e588f82011-03-11 02:49:26 +00001110 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001113 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001114 else
1115 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
1117 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001118 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001119 if (adapter->netdev->features & NETIF_F_RXHASH)
1120 skb->rxhash = rxcp->rss_hash;
1121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001123 if (unlikely(rxcp->vlanf))
1124 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1125
1126 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127}
1128
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001129/* Process the RX completion indicated by rxcp when GRO is enabled */
1130static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001131 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001132 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133{
1134 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001135 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001136 struct be_queue_info *rxq = &rxo->q;
1137 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001138 u16 remaining, curr_frag_len;
1139 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001140
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001141 skb = napi_get_frags(&eq_obj->napi);
1142 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001143 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001144 return;
1145 }
1146
Sathya Perla2e588f82011-03-11 02:49:26 +00001147 remaining = rxcp->pkt_size;
1148 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1149 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150
1151 curr_frag_len = min(remaining, rx_frag_size);
1152
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001153 /* Coalesce all frags from the same physical page in one slot */
1154 if (i == 0 || page_info->page_offset == 0) {
1155 /* First frag or Fresh page */
1156 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001157 skb_shinfo(skb)->frags[j].page = page_info->page;
1158 skb_shinfo(skb)->frags[j].page_offset =
1159 page_info->page_offset;
1160 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001161 } else {
1162 put_page(page_info->page);
1163 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001164 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001165
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001167 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168 memset(page_info, 0, sizeof(*page_info));
1169 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001170 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001172 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001173 skb->len = rxcp->pkt_size;
1174 skb->data_len = rxcp->pkt_size;
1175 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001176 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001177 if (adapter->netdev->features & NETIF_F_RXHASH)
1178 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001179
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001180 if (unlikely(rxcp->vlanf))
1181 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1182
1183 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184}
1185
Sathya Perla2e588f82011-03-11 02:49:26 +00001186static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1187 struct be_eth_rx_compl *compl,
1188 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189{
Sathya Perla2e588f82011-03-11 02:49:26 +00001190 rxcp->pkt_size =
1191 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1192 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1193 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1194 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001195 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001196 rxcp->ip_csum =
1197 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1198 rxcp->l4_csum =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1200 rxcp->ipv6 =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1202 rxcp->rxq_idx =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1204 rxcp->num_rcvd =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1206 rxcp->pkt_type =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001208 rxcp->rss_hash =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001210 if (rxcp->vlanf) {
1211 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001212 compl);
1213 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1214 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001215 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001216}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217
Sathya Perla2e588f82011-03-11 02:49:26 +00001218static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1219 struct be_eth_rx_compl *compl,
1220 struct be_rx_compl_info *rxcp)
1221{
1222 rxcp->pkt_size =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1224 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1225 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1226 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001227 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001228 rxcp->ip_csum =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1230 rxcp->l4_csum =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1232 rxcp->ipv6 =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1234 rxcp->rxq_idx =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1236 rxcp->num_rcvd =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1238 rxcp->pkt_type =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001240 rxcp->rss_hash =
1241 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001242 if (rxcp->vlanf) {
1243 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001244 compl);
1245 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1246 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001247 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001248}
1249
1250static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1251{
1252 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1253 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1254 struct be_adapter *adapter = rxo->adapter;
1255
1256 /* For checking the valid bit it is Ok to use either definition as the
1257 * valid bit is at the same position in both v0 and v1 Rx compl */
1258 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259 return NULL;
1260
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001261 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 be_dws_le_to_cpu(compl, sizeof(*compl));
1263
1264 if (adapter->be3_native)
1265 be_parse_rx_compl_v1(adapter, compl, rxcp);
1266 else
1267 be_parse_rx_compl_v0(adapter, compl, rxcp);
1268
Sathya Perla15d72182011-03-21 20:49:26 +00001269 if (rxcp->vlanf) {
1270 /* vlanf could be wrongly set in some cards.
1271 * ignore if vtm is not set */
1272 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1273 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001274
Sathya Perla15d72182011-03-21 20:49:26 +00001275 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001276 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001277
David S. Miller3c709f82011-05-11 14:26:15 -04001278 if (((adapter->pvid & VLAN_VID_MASK) ==
1279 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1280 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001281 rxcp->vlanf = 0;
1282 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001283
1284 /* As the compl has been parsed, reset it; we wont touch it again */
1285 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286
Sathya Perla3abcded2010-10-03 22:12:27 -07001287 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288 return rxcp;
1289}
1290
Eric Dumazet1829b082011-03-01 05:48:12 +00001291static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001294
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001296 gfp |= __GFP_COMP;
1297 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001298}
1299
1300/*
1301 * Allocate a page, split it to fragments of size rx_frag_size and post as
1302 * receive buffers to BE
1303 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001304static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305{
Sathya Perla3abcded2010-10-03 22:12:27 -07001306 struct be_adapter *adapter = rxo->adapter;
1307 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001308 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001309 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310 struct page *pagep = NULL;
1311 struct be_eth_rx_d *rxd;
1312 u64 page_dmaaddr = 0, frag_dmaaddr;
1313 u32 posted, page_offset = 0;
1314
Sathya Perla3abcded2010-10-03 22:12:27 -07001315 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1317 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001318 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001320 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321 break;
1322 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001323 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1324 0, adapter->big_page_size,
1325 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326 page_info->page_offset = 0;
1327 } else {
1328 get_page(pagep);
1329 page_info->page_offset = page_offset + rx_frag_size;
1330 }
1331 page_offset = page_info->page_offset;
1332 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001333 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1335
1336 rxd = queue_head_node(rxq);
1337 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1338 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001339
1340 /* Any space left in the current big page for another frag? */
1341 if ((page_offset + rx_frag_size + rx_frag_size) >
1342 adapter->big_page_size) {
1343 pagep = NULL;
1344 page_info->last_page_user = true;
1345 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001346
1347 prev_page_info = page_info;
1348 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349 page_info = &page_info_tbl[rxq->head];
1350 }
1351 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001352 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353
1354 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001356 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001357 } else if (atomic_read(&rxq->used) == 0) {
1358 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001359 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361}
1362
Sathya Perla5fb379e2009-06-18 00:02:59 +00001363static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1366
1367 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1368 return NULL;
1369
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001370 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1372
1373 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1374
1375 queue_tail_inc(tx_cq);
1376 return txcp;
1377}
1378
Sathya Perla3c8def92011-06-12 20:01:58 +00001379static u16 be_tx_compl_process(struct be_adapter *adapter,
1380 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381{
Sathya Perla3c8def92011-06-12 20:01:58 +00001382 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001383 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001384 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001386 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1387 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001389 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001391 sent_skbs[txq->tail] = NULL;
1392
1393 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001394 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001396 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001398 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001399 unmap_tx_frag(&adapter->pdev->dev, wrb,
1400 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001401 unmap_skb_hdr = false;
1402
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 num_wrbs++;
1404 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001405 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001408 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409}
1410
Sathya Perla859b1e42009-08-10 03:43:51 +00001411static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1412{
1413 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1414
1415 if (!eqe->evt)
1416 return NULL;
1417
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001418 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001419 eqe->evt = le32_to_cpu(eqe->evt);
1420 queue_tail_inc(&eq_obj->q);
1421 return eqe;
1422}
1423
1424static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001425 struct be_eq_obj *eq_obj,
1426 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001427{
1428 struct be_eq_entry *eqe;
1429 u16 num = 0;
1430
1431 while ((eqe = event_get(eq_obj)) != NULL) {
1432 eqe->evt = 0;
1433 num++;
1434 }
1435
1436 /* Deal with any spurious interrupts that come
1437 * without events
1438 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001439 if (!num)
1440 rearm = true;
1441
1442 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001443 if (num)
1444 napi_schedule(&eq_obj->napi);
1445
1446 return num;
1447}
1448
1449/* Just read and notify events without processing them.
1450 * Used at the time of destroying event queues */
1451static void be_eq_clean(struct be_adapter *adapter,
1452 struct be_eq_obj *eq_obj)
1453{
1454 struct be_eq_entry *eqe;
1455 u16 num = 0;
1456
1457 while ((eqe = event_get(eq_obj)) != NULL) {
1458 eqe->evt = 0;
1459 num++;
1460 }
1461
1462 if (num)
1463 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1464}
1465
Sathya Perla3abcded2010-10-03 22:12:27 -07001466static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467{
1468 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001469 struct be_queue_info *rxq = &rxo->q;
1470 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001471 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472 u16 tail;
1473
1474 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001475 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1476 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001477 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 }
1479
1480 /* Then free posted rx buffer that were not used */
1481 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001482 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001483 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 put_page(page_info->page);
1485 memset(page_info, 0, sizeof(*page_info));
1486 }
1487 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001488 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489}
1490
Sathya Perla3c8def92011-06-12 20:01:58 +00001491static void be_tx_compl_clean(struct be_adapter *adapter,
1492 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493{
Sathya Perla3c8def92011-06-12 20:01:58 +00001494 struct be_queue_info *tx_cq = &txo->cq;
1495 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001496 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001497 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001498 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001499 struct sk_buff *sent_skb;
1500 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501
Sathya Perlaa8e91792009-08-10 03:42:43 +00001502 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1503 do {
1504 while ((txcp = be_tx_compl_get(tx_cq))) {
1505 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1506 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001507 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001508 cmpl++;
1509 }
1510 if (cmpl) {
1511 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001512 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001513 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001514 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001515 }
1516
1517 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1518 break;
1519
1520 mdelay(1);
1521 } while (true);
1522
1523 if (atomic_read(&txq->used))
1524 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1525 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001526
1527 /* free posted tx for which compls will never arrive */
1528 while (atomic_read(&txq->used)) {
1529 sent_skb = sent_skbs[txq->tail];
1530 end_idx = txq->tail;
1531 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001532 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1533 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001534 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001535 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001536 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537}
1538
Sathya Perla5fb379e2009-06-18 00:02:59 +00001539static void be_mcc_queues_destroy(struct be_adapter *adapter)
1540{
1541 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001542
Sathya Perla8788fdc2009-07-27 22:52:03 +00001543 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001544 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001545 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001546 be_queue_free(adapter, q);
1547
Sathya Perla8788fdc2009-07-27 22:52:03 +00001548 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001549 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001550 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001551 be_queue_free(adapter, q);
1552}
1553
1554/* Must be called only after TX qs are created as MCC shares TX EQ */
1555static int be_mcc_queues_create(struct be_adapter *adapter)
1556{
1557 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001558
1559 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001560 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001561 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001562 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001563 goto err;
1564
1565 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001566 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001567 goto mcc_cq_free;
1568
1569 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001570 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001571 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1572 goto mcc_cq_destroy;
1573
1574 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001575 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001576 goto mcc_q_free;
1577
1578 return 0;
1579
1580mcc_q_free:
1581 be_queue_free(adapter, q);
1582mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001583 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001584mcc_cq_free:
1585 be_queue_free(adapter, cq);
1586err:
1587 return -1;
1588}
1589
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590static void be_tx_queues_destroy(struct be_adapter *adapter)
1591{
1592 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001593 struct be_tx_obj *txo;
1594 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595
Sathya Perla3c8def92011-06-12 20:01:58 +00001596 for_all_tx_queues(adapter, txo, i) {
1597 q = &txo->q;
1598 if (q->created)
1599 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1600 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601
Sathya Perla3c8def92011-06-12 20:01:58 +00001602 q = &txo->cq;
1603 if (q->created)
1604 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1605 be_queue_free(adapter, q);
1606 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607
Sathya Perla859b1e42009-08-10 03:43:51 +00001608 /* Clear any residual events */
1609 be_eq_clean(adapter, &adapter->tx_eq);
1610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611 q = &adapter->tx_eq.q;
1612 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001613 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 be_queue_free(adapter, q);
1615}
1616
Sathya Perla3c8def92011-06-12 20:01:58 +00001617/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618static int be_tx_queues_create(struct be_adapter *adapter)
1619{
1620 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001621 struct be_tx_obj *txo;
1622 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623
1624 adapter->tx_eq.max_eqd = 0;
1625 adapter->tx_eq.min_eqd = 0;
1626 adapter->tx_eq.cur_eqd = 96;
1627 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001628
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001630 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1631 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632 return -1;
1633
Sathya Perla8788fdc2009-07-27 22:52:03 +00001634 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001635 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001636 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001637
Sathya Perla3c8def92011-06-12 20:01:58 +00001638 for_all_tx_queues(adapter, txo, i) {
1639 cq = &txo->cq;
1640 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001642 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643
Sathya Perla3c8def92011-06-12 20:01:58 +00001644 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1645 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646
Sathya Perla3c8def92011-06-12 20:01:58 +00001647 q = &txo->q;
1648 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1649 sizeof(struct be_eth_wrb)))
1650 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651
Sathya Perla3c8def92011-06-12 20:01:58 +00001652 if (be_cmd_txq_create(adapter, q, cq))
1653 goto err;
1654 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655 return 0;
1656
Sathya Perla3c8def92011-06-12 20:01:58 +00001657err:
1658 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 return -1;
1660}
1661
1662static void be_rx_queues_destroy(struct be_adapter *adapter)
1663{
1664 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001665 struct be_rx_obj *rxo;
1666 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001667
Sathya Perla3abcded2010-10-03 22:12:27 -07001668 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001669 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001670
Sathya Perla3abcded2010-10-03 22:12:27 -07001671 q = &rxo->cq;
1672 if (q->created)
1673 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1674 be_queue_free(adapter, q);
1675
Sathya Perla3abcded2010-10-03 22:12:27 -07001676 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001677 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001678 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001679 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681}
1682
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001683static u32 be_num_rxqs_want(struct be_adapter *adapter)
1684{
Sathya Perlac814fd32011-06-26 20:41:25 +00001685 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001686 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1687 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1688 } else {
1689 dev_warn(&adapter->pdev->dev,
1690 "No support for multiple RX queues\n");
1691 return 1;
1692 }
1693}
1694
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695static int be_rx_queues_create(struct be_adapter *adapter)
1696{
1697 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001698 struct be_rx_obj *rxo;
1699 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001701 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1702 msix_enabled(adapter) ?
1703 adapter->num_msix_vec - 1 : 1);
1704 if (adapter->num_rx_qs != MAX_RX_QS)
1705 dev_warn(&adapter->pdev->dev,
1706 "Can create only %d RX queues", adapter->num_rx_qs);
1707
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001709 for_all_rx_queues(adapter, rxo, i) {
1710 rxo->adapter = adapter;
1711 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1712 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713
Sathya Perla3abcded2010-10-03 22:12:27 -07001714 /* EQ */
1715 eq = &rxo->rx_eq.q;
1716 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1717 sizeof(struct be_eq_entry));
1718 if (rc)
1719 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720
Sathya Perla3abcded2010-10-03 22:12:27 -07001721 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1722 if (rc)
1723 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001725 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001726
Sathya Perla3abcded2010-10-03 22:12:27 -07001727 /* CQ */
1728 cq = &rxo->cq;
1729 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1730 sizeof(struct be_eth_rx_compl));
1731 if (rc)
1732 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733
Sathya Perla3abcded2010-10-03 22:12:27 -07001734 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1735 if (rc)
1736 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001737
1738 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001739 q = &rxo->q;
1740 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1741 sizeof(struct be_eth_rx_d));
1742 if (rc)
1743 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744
Sathya Perla3abcded2010-10-03 22:12:27 -07001745 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746
1747 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001748err:
1749 be_rx_queues_destroy(adapter);
1750 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001753static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001754{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001755 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1756 if (!eqe->evt)
1757 return false;
1758 else
1759 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001760}
1761
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762static irqreturn_t be_intx(int irq, void *dev)
1763{
1764 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001765 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001766 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001768 if (lancer_chip(adapter)) {
1769 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001770 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001771 for_all_rx_queues(adapter, rxo, i) {
1772 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001773 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001774 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001776 if (!(tx || rx))
1777 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001778
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001779 } else {
1780 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1781 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1782 if (!isr)
1783 return IRQ_NONE;
1784
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001785 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001786 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001787
1788 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001789 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001790 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001791 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001792 }
Sathya Perlac001c212009-07-01 01:06:07 +00001793
Sathya Perla8788fdc2009-07-27 22:52:03 +00001794 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795}
1796
1797static irqreturn_t be_msix_rx(int irq, void *dev)
1798{
Sathya Perla3abcded2010-10-03 22:12:27 -07001799 struct be_rx_obj *rxo = dev;
1800 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801
Sathya Perla3c8def92011-06-12 20:01:58 +00001802 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803
1804 return IRQ_HANDLED;
1805}
1806
Sathya Perla5fb379e2009-06-18 00:02:59 +00001807static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808{
1809 struct be_adapter *adapter = dev;
1810
Sathya Perla3c8def92011-06-12 20:01:58 +00001811 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812
1813 return IRQ_HANDLED;
1814}
1815
Sathya Perla2e588f82011-03-11 02:49:26 +00001816static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817{
Sathya Perla2e588f82011-03-11 02:49:26 +00001818 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819}
1820
stephen hemminger49b05222010-10-21 07:50:48 +00001821static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822{
1823 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001824 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1825 struct be_adapter *adapter = rxo->adapter;
1826 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001827 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828 u32 work_done;
1829
Sathya Perlaac124ff2011-07-25 19:10:14 +00001830 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001832 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833 if (!rxcp)
1834 break;
1835
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001836 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001837 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001838 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001839 be_rx_compl_process_gro(adapter, rxo, rxcp);
1840 else
1841 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001842 } else if (rxcp->pkt_size == 0) {
1843 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001844 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001845
Sathya Perla2e588f82011-03-11 02:49:26 +00001846 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847 }
1848
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001850 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001851 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852
1853 /* All consumed */
1854 if (work_done < budget) {
1855 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001856 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857 } else {
1858 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001859 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860 }
1861 return work_done;
1862}
1863
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001864/* As TX and MCC share the same EQ check for both TX and MCC completions.
1865 * For TX/MCC we don't honour budget; consume everything
1866 */
1867static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001869 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1870 struct be_adapter *adapter =
1871 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001872 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001874 int tx_compl, mcc_compl, status = 0;
1875 u8 i;
1876 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877
Sathya Perla3c8def92011-06-12 20:01:58 +00001878 for_all_tx_queues(adapter, txo, i) {
1879 tx_compl = 0;
1880 num_wrbs = 0;
1881 while ((txcp = be_tx_compl_get(&txo->cq))) {
1882 num_wrbs += be_tx_compl_process(adapter, txo,
1883 AMAP_GET_BITS(struct amap_eth_tx_compl,
1884 wrb_index, txcp));
1885 tx_compl++;
1886 }
1887 if (tx_compl) {
1888 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1889
1890 atomic_sub(num_wrbs, &txo->q.used);
1891
1892 /* As Tx wrbs have been freed up, wake up netdev queue
1893 * if it was stopped due to lack of tx wrbs. */
1894 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1895 atomic_read(&txo->q.used) < txo->q.len / 2) {
1896 netif_wake_subqueue(adapter->netdev, i);
1897 }
1898
Sathya Perlaab1594e2011-07-25 19:10:15 +00001899 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001900 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001901 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001902 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 }
1904
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001905 mcc_compl = be_process_mcc(adapter, &status);
1906
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001907 if (mcc_compl) {
1908 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1909 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1910 }
1911
Sathya Perla3c8def92011-06-12 20:01:58 +00001912 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001913
Sathya Perla3c8def92011-06-12 20:01:58 +00001914 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001915 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 return 1;
1917}
1918
Ajit Khaparded053de92010-09-03 06:23:30 +00001919void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001920{
1921 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1922 u32 i;
1923
1924 pci_read_config_dword(adapter->pdev,
1925 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1926 pci_read_config_dword(adapter->pdev,
1927 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1928 pci_read_config_dword(adapter->pdev,
1929 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1930 pci_read_config_dword(adapter->pdev,
1931 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1932
1933 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1934 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1935
Ajit Khaparded053de92010-09-03 06:23:30 +00001936 if (ue_status_lo || ue_status_hi) {
1937 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001938 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001939 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1940 }
1941
Ajit Khaparde7c185272010-07-29 06:16:33 +00001942 if (ue_status_lo) {
1943 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1944 if (ue_status_lo & 1)
1945 dev_err(&adapter->pdev->dev,
1946 "UE: %s bit set\n", ue_status_low_desc[i]);
1947 }
1948 }
1949 if (ue_status_hi) {
1950 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1951 if (ue_status_hi & 1)
1952 dev_err(&adapter->pdev->dev,
1953 "UE: %s bit set\n", ue_status_hi_desc[i]);
1954 }
1955 }
1956
1957}
1958
Sathya Perlaea1dae12009-03-19 23:56:20 -07001959static void be_worker(struct work_struct *work)
1960{
1961 struct be_adapter *adapter =
1962 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001963 struct be_rx_obj *rxo;
1964 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001965
Sathya Perla16da8252011-03-21 20:49:27 +00001966 if (!adapter->ue_detected && !lancer_chip(adapter))
1967 be_detect_dump_ue(adapter);
1968
Somnath Koturf203af72010-10-25 23:01:03 +00001969 /* when interrupts are not yet enabled, just reap any pending
1970 * mcc completions */
1971 if (!netif_running(adapter->netdev)) {
1972 int mcc_compl, status = 0;
1973
1974 mcc_compl = be_process_mcc(adapter, &status);
1975
1976 if (mcc_compl) {
1977 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1978 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1979 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00001980
Somnath Koturf203af72010-10-25 23:01:03 +00001981 goto reschedule;
1982 }
1983
Selvin Xavier005d5692011-05-16 07:36:35 +00001984 if (!adapter->stats_cmd_sent) {
1985 if (lancer_chip(adapter))
1986 lancer_cmd_get_pport_stats(adapter,
1987 &adapter->stats_cmd);
1988 else
1989 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1990 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001991
Sathya Perla3abcded2010-10-03 22:12:27 -07001992 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001993 be_rx_eqd_update(adapter, rxo);
1994
1995 if (rxo->rx_post_starved) {
1996 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00001997 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07001998 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001999 }
2000
Somnath Koturf203af72010-10-25 23:01:03 +00002001reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002002 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002003 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2004}
2005
Sathya Perla8d56ff12009-11-22 22:02:26 +00002006static void be_msix_disable(struct be_adapter *adapter)
2007{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002008 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002009 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002010 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002011 }
2012}
2013
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002014static void be_msix_enable(struct be_adapter *adapter)
2015{
Sathya Perla3abcded2010-10-03 22:12:27 -07002016#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002017 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002019 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002020
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002021 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022 adapter->msix_entries[i].entry = i;
2023
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002024 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002025 if (status == 0) {
2026 goto done;
2027 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002028 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002029 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002030 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002031 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002032 }
2033 return;
2034done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002035 adapter->num_msix_vec = num_vec;
2036 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037}
2038
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002039static void be_sriov_enable(struct be_adapter *adapter)
2040{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002041 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002042#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002043 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002044 int status, pos;
2045 u16 nvfs;
2046
2047 pos = pci_find_ext_capability(adapter->pdev,
2048 PCI_EXT_CAP_ID_SRIOV);
2049 pci_read_config_word(adapter->pdev,
2050 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2051
2052 if (num_vfs > nvfs) {
2053 dev_info(&adapter->pdev->dev,
2054 "Device supports %d VFs and not %d\n",
2055 nvfs, num_vfs);
2056 num_vfs = nvfs;
2057 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002058
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002059 status = pci_enable_sriov(adapter->pdev, num_vfs);
2060 adapter->sriov_enabled = status ? false : true;
2061 }
2062#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002063}
2064
2065static void be_sriov_disable(struct be_adapter *adapter)
2066{
2067#ifdef CONFIG_PCI_IOV
2068 if (adapter->sriov_enabled) {
2069 pci_disable_sriov(adapter->pdev);
2070 adapter->sriov_enabled = false;
2071 }
2072#endif
2073}
2074
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002075static inline int be_msix_vec_get(struct be_adapter *adapter,
2076 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002078 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002079}
2080
2081static int be_request_irq(struct be_adapter *adapter,
2082 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002083 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002084{
2085 struct net_device *netdev = adapter->netdev;
2086 int vec;
2087
2088 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002089 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002090 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002091}
2092
Sathya Perla3abcded2010-10-03 22:12:27 -07002093static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2094 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002095{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002096 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002097 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098}
2099
2100static int be_msix_register(struct be_adapter *adapter)
2101{
Sathya Perla3abcded2010-10-03 22:12:27 -07002102 struct be_rx_obj *rxo;
2103 int status, i;
2104 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002105
Sathya Perla3abcded2010-10-03 22:12:27 -07002106 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2107 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002108 if (status)
2109 goto err;
2110
Sathya Perla3abcded2010-10-03 22:12:27 -07002111 for_all_rx_queues(adapter, rxo, i) {
2112 sprintf(qname, "rxq%d", i);
2113 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2114 qname, rxo);
2115 if (status)
2116 goto err_msix;
2117 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002118
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002120
Sathya Perla3abcded2010-10-03 22:12:27 -07002121err_msix:
2122 be_free_irq(adapter, &adapter->tx_eq, adapter);
2123
2124 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2125 be_free_irq(adapter, &rxo->rx_eq, rxo);
2126
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127err:
2128 dev_warn(&adapter->pdev->dev,
2129 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002130 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131 return status;
2132}
2133
2134static int be_irq_register(struct be_adapter *adapter)
2135{
2136 struct net_device *netdev = adapter->netdev;
2137 int status;
2138
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002139 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002140 status = be_msix_register(adapter);
2141 if (status == 0)
2142 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002143 /* INTx is not supported for VF */
2144 if (!be_physfn(adapter))
2145 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146 }
2147
2148 /* INTx */
2149 netdev->irq = adapter->pdev->irq;
2150 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2151 adapter);
2152 if (status) {
2153 dev_err(&adapter->pdev->dev,
2154 "INTx request IRQ failed - err %d\n", status);
2155 return status;
2156 }
2157done:
2158 adapter->isr_registered = true;
2159 return 0;
2160}
2161
2162static void be_irq_unregister(struct be_adapter *adapter)
2163{
2164 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002165 struct be_rx_obj *rxo;
2166 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167
2168 if (!adapter->isr_registered)
2169 return;
2170
2171 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002172 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173 free_irq(netdev->irq, adapter);
2174 goto done;
2175 }
2176
2177 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 be_free_irq(adapter, &adapter->tx_eq, adapter);
2179
2180 for_all_rx_queues(adapter, rxo, i)
2181 be_free_irq(adapter, &rxo->rx_eq, rxo);
2182
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183done:
2184 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185}
2186
Sathya Perla482c9e72011-06-29 23:33:17 +00002187static void be_rx_queues_clear(struct be_adapter *adapter)
2188{
2189 struct be_queue_info *q;
2190 struct be_rx_obj *rxo;
2191 int i;
2192
2193 for_all_rx_queues(adapter, rxo, i) {
2194 q = &rxo->q;
2195 if (q->created) {
2196 be_cmd_rxq_destroy(adapter, q);
2197 /* After the rxq is invalidated, wait for a grace time
2198 * of 1ms for all dma to end and the flush compl to
2199 * arrive
2200 */
2201 mdelay(1);
2202 be_rx_q_clean(adapter, rxo);
2203 }
2204
2205 /* Clear any residual events */
2206 q = &rxo->rx_eq.q;
2207 if (q->created)
2208 be_eq_clean(adapter, &rxo->rx_eq);
2209 }
2210}
2211
Sathya Perla889cd4b2010-05-30 23:33:45 +00002212static int be_close(struct net_device *netdev)
2213{
2214 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002215 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002216 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002217 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002218 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002219
Sathya Perla889cd4b2010-05-30 23:33:45 +00002220 be_async_mcc_disable(adapter);
2221
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002222 if (!lancer_chip(adapter))
2223 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002224
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002225 for_all_rx_queues(adapter, rxo, i)
2226 napi_disable(&rxo->rx_eq.napi);
2227
2228 napi_disable(&tx_eq->napi);
2229
2230 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002231 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2232 for_all_rx_queues(adapter, rxo, i)
2233 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002234 for_all_tx_queues(adapter, txo, i)
2235 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002236 }
2237
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002238 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002239 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002240 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002241
2242 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002243 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002244 synchronize_irq(vec);
2245 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002246 } else {
2247 synchronize_irq(netdev->irq);
2248 }
2249 be_irq_unregister(adapter);
2250
Sathya Perla889cd4b2010-05-30 23:33:45 +00002251 /* Wait for all pending tx completions to arrive so that
2252 * all tx skbs are freed.
2253 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002254 for_all_tx_queues(adapter, txo, i)
2255 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002256
Sathya Perla482c9e72011-06-29 23:33:17 +00002257 be_rx_queues_clear(adapter);
2258 return 0;
2259}
2260
2261static int be_rx_queues_setup(struct be_adapter *adapter)
2262{
2263 struct be_rx_obj *rxo;
2264 int rc, i;
2265 u8 rsstable[MAX_RSS_QS];
2266
2267 for_all_rx_queues(adapter, rxo, i) {
2268 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2269 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2270 adapter->if_handle,
2271 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2272 if (rc)
2273 return rc;
2274 }
2275
2276 if (be_multi_rxq(adapter)) {
2277 for_all_rss_queues(adapter, rxo, i)
2278 rsstable[i] = rxo->rss_id;
2279
2280 rc = be_cmd_rss_config(adapter, rsstable,
2281 adapter->num_rx_qs - 1);
2282 if (rc)
2283 return rc;
2284 }
2285
2286 /* First time posting */
2287 for_all_rx_queues(adapter, rxo, i) {
2288 be_post_rx_frags(rxo, GFP_KERNEL);
2289 napi_enable(&rxo->rx_eq.napi);
2290 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002291 return 0;
2292}
2293
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294static int be_open(struct net_device *netdev)
2295{
2296 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002298 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002300
Sathya Perla482c9e72011-06-29 23:33:17 +00002301 status = be_rx_queues_setup(adapter);
2302 if (status)
2303 goto err;
2304
Sathya Perla5fb379e2009-06-18 00:02:59 +00002305 napi_enable(&tx_eq->napi);
2306
2307 be_irq_register(adapter);
2308
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002309 if (!lancer_chip(adapter))
2310 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002311
2312 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 for_all_rx_queues(adapter, rxo, i) {
2314 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2315 be_cq_notify(adapter, rxo->cq.id, true, 0);
2316 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002317 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002318
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002319 /* Now that interrupts are on we can process async mcc */
2320 be_async_mcc_enable(adapter);
2321
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002322 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002323 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002324 if (status)
2325 goto err;
2326
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002327 status = be_cmd_set_flow_control(adapter,
2328 adapter->tx_fc, adapter->rx_fc);
2329 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002330 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002331 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002332
Sathya Perla889cd4b2010-05-30 23:33:45 +00002333 return 0;
2334err:
2335 be_close(adapter->netdev);
2336 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002337}
2338
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002339static int be_setup_wol(struct be_adapter *adapter, bool enable)
2340{
2341 struct be_dma_mem cmd;
2342 int status = 0;
2343 u8 mac[ETH_ALEN];
2344
2345 memset(mac, 0, ETH_ALEN);
2346
2347 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002348 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2349 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002350 if (cmd.va == NULL)
2351 return -1;
2352 memset(cmd.va, 0, cmd.size);
2353
2354 if (enable) {
2355 status = pci_write_config_dword(adapter->pdev,
2356 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2357 if (status) {
2358 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002359 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002360 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2361 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002362 return status;
2363 }
2364 status = be_cmd_enable_magic_wol(adapter,
2365 adapter->netdev->dev_addr, &cmd);
2366 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2367 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2368 } else {
2369 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2370 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2371 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2372 }
2373
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002374 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002375 return status;
2376}
2377
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002378/*
2379 * Generate a seed MAC address from the PF MAC Address using jhash.
2380 * MAC Address for VFs are assigned incrementally starting from the seed.
2381 * These addresses are programmed in the ASIC by the PF and the VF driver
2382 * queries for the MAC address during its probe.
2383 */
2384static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2385{
2386 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002387 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002388 u8 mac[ETH_ALEN];
2389
2390 be_vf_eth_addr_generate(adapter, mac);
2391
2392 for (vf = 0; vf < num_vfs; vf++) {
2393 status = be_cmd_pmac_add(adapter, mac,
2394 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002395 &adapter->vf_cfg[vf].vf_pmac_id,
2396 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002397 if (status)
2398 dev_err(&adapter->pdev->dev,
2399 "Mac address add failed for VF %d\n", vf);
2400 else
2401 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2402
2403 mac[5] += 1;
2404 }
2405 return status;
2406}
2407
2408static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2409{
2410 u32 vf;
2411
2412 for (vf = 0; vf < num_vfs; vf++) {
2413 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2414 be_cmd_pmac_del(adapter,
2415 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002416 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002417 }
2418}
2419
Sathya Perla5fb379e2009-06-18 00:02:59 +00002420static int be_setup(struct be_adapter *adapter)
2421{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002422 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002423 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002424 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002425 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002426
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002427 be_cmd_req_native_mode(adapter);
2428
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002429 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2430 BE_IF_FLAGS_BROADCAST |
2431 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002432
2433 if (be_physfn(adapter)) {
2434 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2435 BE_IF_FLAGS_PROMISCUOUS |
2436 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2437 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002438
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002439 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002440 cap_flags |= BE_IF_FLAGS_RSS;
2441 en_flags |= BE_IF_FLAGS_RSS;
2442 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002443 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002444
2445 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2446 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002447 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002448 if (status != 0)
2449 goto do_none;
2450
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002451 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002452 if (adapter->sriov_enabled) {
2453 while (vf < num_vfs) {
2454 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2455 BE_IF_FLAGS_BROADCAST;
2456 status = be_cmd_if_create(adapter, cap_flags,
2457 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002458 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002459 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002460 if (status) {
2461 dev_err(&adapter->pdev->dev,
2462 "Interface Create failed for VF %d\n",
2463 vf);
2464 goto if_destroy;
2465 }
2466 adapter->vf_cfg[vf].vf_pmac_id =
2467 BE_INVALID_PMAC_ID;
2468 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002469 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002470 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002471 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002472 status = be_cmd_mac_addr_query(adapter, mac,
2473 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2474 if (!status) {
2475 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2476 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2477 }
2478 }
2479
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480 status = be_tx_queues_create(adapter);
2481 if (status != 0)
2482 goto if_destroy;
2483
2484 status = be_rx_queues_create(adapter);
2485 if (status != 0)
2486 goto tx_qs_destroy;
2487
Sathya Perla2903dd62011-06-26 20:41:53 +00002488 /* Allow all priorities by default. A GRP5 evt may modify this */
2489 adapter->vlan_prio_bmap = 0xff;
2490
Sathya Perla5fb379e2009-06-18 00:02:59 +00002491 status = be_mcc_queues_create(adapter);
2492 if (status != 0)
2493 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002494
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002495 adapter->link_speed = -1;
2496
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497 return 0;
2498
Sathya Perla5fb379e2009-06-18 00:02:59 +00002499rx_qs_destroy:
2500 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002501tx_qs_destroy:
2502 be_tx_queues_destroy(adapter);
2503if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002504 if (be_physfn(adapter) && adapter->sriov_enabled)
2505 for (vf = 0; vf < num_vfs; vf++)
2506 if (adapter->vf_cfg[vf].vf_if_handle)
2507 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002508 adapter->vf_cfg[vf].vf_if_handle,
2509 vf + 1);
2510 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002511do_none:
2512 return status;
2513}
2514
Sathya Perla5fb379e2009-06-18 00:02:59 +00002515static int be_clear(struct be_adapter *adapter)
2516{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002517 int vf;
2518
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002519 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002520 be_vf_eth_addr_rem(adapter);
2521
Sathya Perla1a8887d2009-08-17 00:58:41 +00002522 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002523 be_rx_queues_destroy(adapter);
2524 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002525 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002526
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002527 if (be_physfn(adapter) && adapter->sriov_enabled)
2528 for (vf = 0; vf < num_vfs; vf++)
2529 if (adapter->vf_cfg[vf].vf_if_handle)
2530 be_cmd_if_destroy(adapter,
2531 adapter->vf_cfg[vf].vf_if_handle,
2532 vf + 1);
2533
Ajit Khaparde658681f2011-02-11 13:34:46 +00002534 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002535
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002536 adapter->be3_native = 0;
2537
Sathya Perla2243e2e2009-11-22 22:02:03 +00002538 /* tell fw we're done with firing cmds */
2539 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002540 return 0;
2541}
2542
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002543
Ajit Khaparde84517482009-09-04 03:12:16 +00002544#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002545static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002546 const u8 *p, u32 img_start, int image_size,
2547 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002548{
2549 u32 crc_offset;
2550 u8 flashed_crc[4];
2551 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002552
2553 crc_offset = hdr_size + img_start + image_size - 4;
2554
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002555 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002556
2557 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002558 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002559 if (status) {
2560 dev_err(&adapter->pdev->dev,
2561 "could not get crc from flash, not flashing redboot\n");
2562 return false;
2563 }
2564
2565 /*update redboot only if crc does not match*/
2566 if (!memcmp(flashed_crc, p, 4))
2567 return false;
2568 else
2569 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002570}
2571
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002572static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002573 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002574 struct be_dma_mem *flash_cmd, int num_of_images)
2575
Ajit Khaparde84517482009-09-04 03:12:16 +00002576{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002577 int status = 0, i, filehdr_size = 0;
2578 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002579 int num_bytes;
2580 const u8 *p = fw->data;
2581 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002582 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002583 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002584
Joe Perches215faf92010-12-21 02:16:10 -08002585 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002586 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2587 FLASH_IMAGE_MAX_SIZE_g3},
2588 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2589 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2590 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2591 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2592 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2593 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2594 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2595 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2596 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2597 FLASH_IMAGE_MAX_SIZE_g3},
2598 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2599 FLASH_IMAGE_MAX_SIZE_g3},
2600 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002601 FLASH_IMAGE_MAX_SIZE_g3},
2602 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2603 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002604 };
Joe Perches215faf92010-12-21 02:16:10 -08002605 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002606 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2607 FLASH_IMAGE_MAX_SIZE_g2},
2608 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2609 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2610 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2611 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2612 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2613 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2614 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2615 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2616 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2617 FLASH_IMAGE_MAX_SIZE_g2},
2618 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2619 FLASH_IMAGE_MAX_SIZE_g2},
2620 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2621 FLASH_IMAGE_MAX_SIZE_g2}
2622 };
2623
2624 if (adapter->generation == BE_GEN3) {
2625 pflashcomp = gen3_flash_types;
2626 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002627 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002628 } else {
2629 pflashcomp = gen2_flash_types;
2630 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002631 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002632 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002633 for (i = 0; i < num_comp; i++) {
2634 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2635 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2636 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002637 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2638 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002639 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2640 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002641 continue;
2642 p = fw->data;
2643 p += filehdr_size + pflashcomp[i].offset
2644 + (num_of_images * sizeof(struct image_hdr));
2645 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002646 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002647 total_bytes = pflashcomp[i].size;
2648 while (total_bytes) {
2649 if (total_bytes > 32*1024)
2650 num_bytes = 32*1024;
2651 else
2652 num_bytes = total_bytes;
2653 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002654
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002655 if (!total_bytes)
2656 flash_op = FLASHROM_OPER_FLASH;
2657 else
2658 flash_op = FLASHROM_OPER_SAVE;
2659 memcpy(req->params.data_buf, p, num_bytes);
2660 p += num_bytes;
2661 status = be_cmd_write_flashrom(adapter, flash_cmd,
2662 pflashcomp[i].optype, flash_op, num_bytes);
2663 if (status) {
2664 dev_err(&adapter->pdev->dev,
2665 "cmd to write to flash rom failed.\n");
2666 return -1;
2667 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002668 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002669 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002670 return 0;
2671}
2672
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002673static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2674{
2675 if (fhdr == NULL)
2676 return 0;
2677 if (fhdr->build[0] == '3')
2678 return BE_GEN3;
2679 else if (fhdr->build[0] == '2')
2680 return BE_GEN2;
2681 else
2682 return 0;
2683}
2684
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002685static int lancer_fw_download(struct be_adapter *adapter,
2686 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002687{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002688#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2689#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2690 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002691 const u8 *data_ptr = NULL;
2692 u8 *dest_image_ptr = NULL;
2693 size_t image_size = 0;
2694 u32 chunk_size = 0;
2695 u32 data_written = 0;
2696 u32 offset = 0;
2697 int status = 0;
2698 u8 add_status = 0;
2699
2700 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2701 dev_err(&adapter->pdev->dev,
2702 "FW Image not properly aligned. "
2703 "Length must be 4 byte aligned.\n");
2704 status = -EINVAL;
2705 goto lancer_fw_exit;
2706 }
2707
2708 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2709 + LANCER_FW_DOWNLOAD_CHUNK;
2710 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2711 &flash_cmd.dma, GFP_KERNEL);
2712 if (!flash_cmd.va) {
2713 status = -ENOMEM;
2714 dev_err(&adapter->pdev->dev,
2715 "Memory allocation failure while flashing\n");
2716 goto lancer_fw_exit;
2717 }
2718
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002719 dest_image_ptr = flash_cmd.va +
2720 sizeof(struct lancer_cmd_req_write_object);
2721 image_size = fw->size;
2722 data_ptr = fw->data;
2723
2724 while (image_size) {
2725 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2726
2727 /* Copy the image chunk content. */
2728 memcpy(dest_image_ptr, data_ptr, chunk_size);
2729
2730 status = lancer_cmd_write_object(adapter, &flash_cmd,
2731 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2732 &data_written, &add_status);
2733
2734 if (status)
2735 break;
2736
2737 offset += data_written;
2738 data_ptr += data_written;
2739 image_size -= data_written;
2740 }
2741
2742 if (!status) {
2743 /* Commit the FW written */
2744 status = lancer_cmd_write_object(adapter, &flash_cmd,
2745 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2746 &data_written, &add_status);
2747 }
2748
2749 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2750 flash_cmd.dma);
2751 if (status) {
2752 dev_err(&adapter->pdev->dev,
2753 "Firmware load error. "
2754 "Status code: 0x%x Additional Status: 0x%x\n",
2755 status, add_status);
2756 goto lancer_fw_exit;
2757 }
2758
2759 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2760lancer_fw_exit:
2761 return status;
2762}
2763
2764static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2765{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002766 struct flash_file_hdr_g2 *fhdr;
2767 struct flash_file_hdr_g3 *fhdr3;
2768 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002769 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002770 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002771 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002772
2773 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002774 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002775
Ajit Khaparde84517482009-09-04 03:12:16 +00002776 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002777 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2778 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002779 if (!flash_cmd.va) {
2780 status = -ENOMEM;
2781 dev_err(&adapter->pdev->dev,
2782 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002783 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002784 }
2785
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002786 if ((adapter->generation == BE_GEN3) &&
2787 (get_ufigen_type(fhdr) == BE_GEN3)) {
2788 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002789 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2790 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002791 img_hdr_ptr = (struct image_hdr *) (fw->data +
2792 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002793 i * sizeof(struct image_hdr)));
2794 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2795 status = be_flash_data(adapter, fw, &flash_cmd,
2796 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002797 }
2798 } else if ((adapter->generation == BE_GEN2) &&
2799 (get_ufigen_type(fhdr) == BE_GEN2)) {
2800 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2801 } else {
2802 dev_err(&adapter->pdev->dev,
2803 "UFI and Interface are not compatible for flashing\n");
2804 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002805 }
2806
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002807 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2808 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002809 if (status) {
2810 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002811 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002812 }
2813
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002814 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002815
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002816be_fw_exit:
2817 return status;
2818}
2819
2820int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2821{
2822 const struct firmware *fw;
2823 int status;
2824
2825 if (!netif_running(adapter->netdev)) {
2826 dev_err(&adapter->pdev->dev,
2827 "Firmware load not allowed (interface is down)\n");
2828 return -1;
2829 }
2830
2831 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2832 if (status)
2833 goto fw_exit;
2834
2835 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2836
2837 if (lancer_chip(adapter))
2838 status = lancer_fw_download(adapter, fw);
2839 else
2840 status = be_fw_download(adapter, fw);
2841
Ajit Khaparde84517482009-09-04 03:12:16 +00002842fw_exit:
2843 release_firmware(fw);
2844 return status;
2845}
2846
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002847static struct net_device_ops be_netdev_ops = {
2848 .ndo_open = be_open,
2849 .ndo_stop = be_close,
2850 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002851 .ndo_set_rx_mode = be_set_multicast_list,
2852 .ndo_set_mac_address = be_mac_addr_set,
2853 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00002854 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002855 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002856 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2857 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002858 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002859 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002860 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002861 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002862};
2863
2864static void be_netdev_init(struct net_device *netdev)
2865{
2866 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002867 struct be_rx_obj *rxo;
2868 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002869
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002870 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002871 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2872 NETIF_F_HW_VLAN_TX;
2873 if (be_multi_rxq(adapter))
2874 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002875
2876 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002877 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002878
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002879 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002880 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002881
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002882 netdev->flags |= IFF_MULTICAST;
2883
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002884 /* Default settings for Rx and Tx flow control */
2885 adapter->rx_fc = true;
2886 adapter->tx_fc = true;
2887
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002888 netif_set_gso_max_size(netdev, 65535);
2889
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002890 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2891
2892 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2893
Sathya Perla3abcded2010-10-03 22:12:27 -07002894 for_all_rx_queues(adapter, rxo, i)
2895 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2896 BE_NAPI_WEIGHT);
2897
Sathya Perla5fb379e2009-06-18 00:02:59 +00002898 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002900}
2901
2902static void be_unmap_pci_bars(struct be_adapter *adapter)
2903{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002904 if (adapter->csr)
2905 iounmap(adapter->csr);
2906 if (adapter->db)
2907 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002908 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002909 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002910}
2911
2912static int be_map_pci_bars(struct be_adapter *adapter)
2913{
2914 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002915 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002916
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002917 if (lancer_chip(adapter)) {
2918 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2919 pci_resource_len(adapter->pdev, 0));
2920 if (addr == NULL)
2921 return -ENOMEM;
2922 adapter->db = addr;
2923 return 0;
2924 }
2925
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002926 if (be_physfn(adapter)) {
2927 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2928 pci_resource_len(adapter->pdev, 2));
2929 if (addr == NULL)
2930 return -ENOMEM;
2931 adapter->csr = addr;
2932 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002934 if (adapter->generation == BE_GEN2) {
2935 pcicfg_reg = 1;
2936 db_reg = 4;
2937 } else {
2938 pcicfg_reg = 0;
2939 if (be_physfn(adapter))
2940 db_reg = 4;
2941 else
2942 db_reg = 0;
2943 }
2944 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2945 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002946 if (addr == NULL)
2947 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002948 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002949
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002950 if (be_physfn(adapter)) {
2951 addr = ioremap_nocache(
2952 pci_resource_start(adapter->pdev, pcicfg_reg),
2953 pci_resource_len(adapter->pdev, pcicfg_reg));
2954 if (addr == NULL)
2955 goto pci_map_err;
2956 adapter->pcicfg = addr;
2957 } else
2958 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002959
2960 return 0;
2961pci_map_err:
2962 be_unmap_pci_bars(adapter);
2963 return -ENOMEM;
2964}
2965
2966
2967static void be_ctrl_cleanup(struct be_adapter *adapter)
2968{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002969 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002970
2971 be_unmap_pci_bars(adapter);
2972
2973 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002974 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2975 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002976
Sathya Perla5b8821b2011-08-02 19:57:44 +00002977 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002978 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002979 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2980 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981}
2982
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002983static int be_ctrl_init(struct be_adapter *adapter)
2984{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002985 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2986 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00002987 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002988 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002989
2990 status = be_map_pci_bars(adapter);
2991 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002992 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002993
2994 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002995 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2996 mbox_mem_alloc->size,
2997 &mbox_mem_alloc->dma,
2998 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002999 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003000 status = -ENOMEM;
3001 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003002 }
3003 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3004 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3005 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3006 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003007
Sathya Perla5b8821b2011-08-02 19:57:44 +00003008 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3009 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3010 &rx_filter->dma, GFP_KERNEL);
3011 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003012 status = -ENOMEM;
3013 goto free_mbox;
3014 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003015 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003016
Ivan Vecera29849612010-12-14 05:43:19 +00003017 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003018 spin_lock_init(&adapter->mcc_lock);
3019 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003020
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003021 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003022 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003024
3025free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003026 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3027 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003028
3029unmap_pci_bars:
3030 be_unmap_pci_bars(adapter);
3031
3032done:
3033 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003034}
3035
3036static void be_stats_cleanup(struct be_adapter *adapter)
3037{
Sathya Perla3abcded2010-10-03 22:12:27 -07003038 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003039
3040 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003041 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3042 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003043}
3044
3045static int be_stats_init(struct be_adapter *adapter)
3046{
Sathya Perla3abcded2010-10-03 22:12:27 -07003047 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003048
Selvin Xavier005d5692011-05-16 07:36:35 +00003049 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003050 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003051 } else {
3052 if (lancer_chip(adapter))
3053 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3054 else
3055 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3056 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003057 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3058 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003059 if (cmd->va == NULL)
3060 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003061 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062 return 0;
3063}
3064
3065static void __devexit be_remove(struct pci_dev *pdev)
3066{
3067 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003068
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003069 if (!adapter)
3070 return;
3071
Somnath Koturf203af72010-10-25 23:01:03 +00003072 cancel_delayed_work_sync(&adapter->work);
3073
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003074 unregister_netdev(adapter->netdev);
3075
Sathya Perla5fb379e2009-06-18 00:02:59 +00003076 be_clear(adapter);
3077
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003078 be_stats_cleanup(adapter);
3079
3080 be_ctrl_cleanup(adapter);
3081
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003082 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003083 be_sriov_disable(adapter);
3084
Sathya Perla8d56ff12009-11-22 22:02:26 +00003085 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086
3087 pci_set_drvdata(pdev, NULL);
3088 pci_release_regions(pdev);
3089 pci_disable_device(pdev);
3090
3091 free_netdev(adapter->netdev);
3092}
3093
Sathya Perla2243e2e2009-11-22 22:02:03 +00003094static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003095{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003096 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003097 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003098
Sathya Perla8788fdc2009-07-27 22:52:03 +00003099 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003100 if (status)
3101 return status;
3102
Sathya Perla3abcded2010-10-03 22:12:27 -07003103 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3104 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003105 if (status)
3106 return status;
3107
3108 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003109
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003110 /* A default permanent address is given to each VF for Lancer*/
3111 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003112 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003113 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003114
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003115 if (status)
3116 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003117
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003118 if (!is_valid_ether_addr(mac))
3119 return -EADDRNOTAVAIL;
3120
3121 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3122 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3123 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003124
Ajit Khaparde3486be22010-07-23 02:04:54 +00003125 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003126 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3127 else
3128 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3129
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003130 status = be_cmd_get_cntl_attributes(adapter);
3131 if (status)
3132 return status;
3133
Sathya Perla3c8def92011-06-12 20:01:58 +00003134 if ((num_vfs && adapter->sriov_enabled) ||
3135 (adapter->function_mode & 0x400) ||
3136 lancer_chip(adapter) || !be_physfn(adapter)) {
3137 adapter->num_tx_qs = 1;
3138 netif_set_real_num_tx_queues(adapter->netdev,
3139 adapter->num_tx_qs);
3140 } else {
3141 adapter->num_tx_qs = MAX_TX_QS;
3142 }
3143
Sathya Perla2243e2e2009-11-22 22:02:03 +00003144 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003145}
3146
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003147static int be_dev_family_check(struct be_adapter *adapter)
3148{
3149 struct pci_dev *pdev = adapter->pdev;
3150 u32 sli_intf = 0, if_type;
3151
3152 switch (pdev->device) {
3153 case BE_DEVICE_ID1:
3154 case OC_DEVICE_ID1:
3155 adapter->generation = BE_GEN2;
3156 break;
3157 case BE_DEVICE_ID2:
3158 case OC_DEVICE_ID2:
3159 adapter->generation = BE_GEN3;
3160 break;
3161 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003162 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003163 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3164 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3165 SLI_INTF_IF_TYPE_SHIFT;
3166
3167 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3168 if_type != 0x02) {
3169 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3170 return -EINVAL;
3171 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003172 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3173 SLI_INTF_FAMILY_SHIFT);
3174 adapter->generation = BE_GEN3;
3175 break;
3176 default:
3177 adapter->generation = 0;
3178 }
3179 return 0;
3180}
3181
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003182static int lancer_wait_ready(struct be_adapter *adapter)
3183{
3184#define SLIPORT_READY_TIMEOUT 500
3185 u32 sliport_status;
3186 int status = 0, i;
3187
3188 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3189 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3190 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3191 break;
3192
3193 msleep(20);
3194 }
3195
3196 if (i == SLIPORT_READY_TIMEOUT)
3197 status = -1;
3198
3199 return status;
3200}
3201
3202static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3203{
3204 int status;
3205 u32 sliport_status, err, reset_needed;
3206 status = lancer_wait_ready(adapter);
3207 if (!status) {
3208 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3209 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3210 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3211 if (err && reset_needed) {
3212 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3213 adapter->db + SLIPORT_CONTROL_OFFSET);
3214
3215 /* check adapter has corrected the error */
3216 status = lancer_wait_ready(adapter);
3217 sliport_status = ioread32(adapter->db +
3218 SLIPORT_STATUS_OFFSET);
3219 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3220 SLIPORT_STATUS_RN_MASK);
3221 if (status || sliport_status)
3222 status = -1;
3223 } else if (err || reset_needed) {
3224 status = -1;
3225 }
3226 }
3227 return status;
3228}
3229
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003230static int __devinit be_probe(struct pci_dev *pdev,
3231 const struct pci_device_id *pdev_id)
3232{
3233 int status = 0;
3234 struct be_adapter *adapter;
3235 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003236
3237 status = pci_enable_device(pdev);
3238 if (status)
3239 goto do_none;
3240
3241 status = pci_request_regions(pdev, DRV_NAME);
3242 if (status)
3243 goto disable_dev;
3244 pci_set_master(pdev);
3245
Sathya Perla3c8def92011-06-12 20:01:58 +00003246 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003247 if (netdev == NULL) {
3248 status = -ENOMEM;
3249 goto rel_reg;
3250 }
3251 adapter = netdev_priv(netdev);
3252 adapter->pdev = pdev;
3253 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003254
3255 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003256 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003257 goto free_netdev;
3258
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003259 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003260 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003261
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003262 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003263 if (!status) {
3264 netdev->features |= NETIF_F_HIGHDMA;
3265 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003266 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003267 if (status) {
3268 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3269 goto free_netdev;
3270 }
3271 }
3272
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003273 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003274 if (adapter->sriov_enabled) {
3275 adapter->vf_cfg = kcalloc(num_vfs,
3276 sizeof(struct be_vf_cfg), GFP_KERNEL);
3277
3278 if (!adapter->vf_cfg)
3279 goto free_netdev;
3280 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003281
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003282 status = be_ctrl_init(adapter);
3283 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003284 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003285
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003286 if (lancer_chip(adapter)) {
3287 status = lancer_test_and_set_rdy_state(adapter);
3288 if (status) {
3289 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003290 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003291 }
3292 }
3293
Sathya Perla2243e2e2009-11-22 22:02:03 +00003294 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003295 if (be_physfn(adapter)) {
3296 status = be_cmd_POST(adapter);
3297 if (status)
3298 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003299 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003300
3301 /* tell fw we're ready to fire cmds */
3302 status = be_cmd_fw_init(adapter);
3303 if (status)
3304 goto ctrl_clean;
3305
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003306 status = be_cmd_reset_function(adapter);
3307 if (status)
3308 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003309
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310 status = be_stats_init(adapter);
3311 if (status)
3312 goto ctrl_clean;
3313
Sathya Perla2243e2e2009-11-22 22:02:03 +00003314 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003315 if (status)
3316 goto stats_clean;
3317
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003318 /* The INTR bit may be set in the card when probed by a kdump kernel
3319 * after a crash.
3320 */
3321 if (!lancer_chip(adapter))
3322 be_intr_set(adapter, false);
3323
Sathya Perla3abcded2010-10-03 22:12:27 -07003324 be_msix_enable(adapter);
3325
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003326 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327
Sathya Perla5fb379e2009-06-18 00:02:59 +00003328 status = be_setup(adapter);
3329 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003330 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003331
Sathya Perla3abcded2010-10-03 22:12:27 -07003332 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003333 status = register_netdev(netdev);
3334 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003335 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003336
Ajit Khapardee6319362011-02-11 13:35:41 +00003337 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003338 u8 mac_speed;
Ajit Khaparded0381c42011-04-19 12:11:55 +00003339 u16 vf, lnk_speed;
3340
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003341 if (!lancer_chip(adapter)) {
3342 status = be_vf_eth_addr_config(adapter);
3343 if (status)
3344 goto unreg_netdev;
3345 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003346
3347 for (vf = 0; vf < num_vfs; vf++) {
Sathya Perlaea172a02011-08-02 19:57:42 +00003348 status = be_cmd_link_status_query(adapter, &mac_speed,
3349 &lnk_speed, vf + 1);
Ajit Khaparded0381c42011-04-19 12:11:55 +00003350 if (!status)
3351 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3352 else
3353 goto unreg_netdev;
3354 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003355 }
3356
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003357 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003358
Somnath Koturf203af72010-10-25 23:01:03 +00003359 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003360 return 0;
3361
Ajit Khapardee6319362011-02-11 13:35:41 +00003362unreg_netdev:
3363 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003364unsetup:
3365 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003366msix_disable:
3367 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003368stats_clean:
3369 be_stats_cleanup(adapter);
3370ctrl_clean:
3371 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003372free_vf_cfg:
3373 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003374free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003375 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003376 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003377 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003378rel_reg:
3379 pci_release_regions(pdev);
3380disable_dev:
3381 pci_disable_device(pdev);
3382do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003383 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003384 return status;
3385}
3386
3387static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3388{
3389 struct be_adapter *adapter = pci_get_drvdata(pdev);
3390 struct net_device *netdev = adapter->netdev;
3391
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003392 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003393 if (adapter->wol)
3394 be_setup_wol(adapter, true);
3395
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003396 netif_device_detach(netdev);
3397 if (netif_running(netdev)) {
3398 rtnl_lock();
3399 be_close(netdev);
3400 rtnl_unlock();
3401 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003402 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003403 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003404
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003405 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003406 pci_save_state(pdev);
3407 pci_disable_device(pdev);
3408 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3409 return 0;
3410}
3411
3412static int be_resume(struct pci_dev *pdev)
3413{
3414 int status = 0;
3415 struct be_adapter *adapter = pci_get_drvdata(pdev);
3416 struct net_device *netdev = adapter->netdev;
3417
3418 netif_device_detach(netdev);
3419
3420 status = pci_enable_device(pdev);
3421 if (status)
3422 return status;
3423
3424 pci_set_power_state(pdev, 0);
3425 pci_restore_state(pdev);
3426
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003427 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003428 /* tell fw we're ready to fire cmds */
3429 status = be_cmd_fw_init(adapter);
3430 if (status)
3431 return status;
3432
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003433 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003434 if (netif_running(netdev)) {
3435 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003436 be_open(netdev);
3437 rtnl_unlock();
3438 }
3439 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003440
3441 if (adapter->wol)
3442 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003443
3444 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003445 return 0;
3446}
3447
Sathya Perla82456b02010-02-17 01:35:37 +00003448/*
3449 * An FLR will stop BE from DMAing any data.
3450 */
3451static void be_shutdown(struct pci_dev *pdev)
3452{
3453 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003454
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003455 if (!adapter)
3456 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003457
Sathya Perla0f4a6822011-03-21 20:49:28 +00003458 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003459
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003460 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003461
Sathya Perla82456b02010-02-17 01:35:37 +00003462 if (adapter->wol)
3463 be_setup_wol(adapter, true);
3464
Ajit Khaparde57841862011-04-06 18:08:43 +00003465 be_cmd_reset_function(adapter);
3466
Sathya Perla82456b02010-02-17 01:35:37 +00003467 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003468}
3469
Sathya Perlacf588472010-02-14 21:22:01 +00003470static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3471 pci_channel_state_t state)
3472{
3473 struct be_adapter *adapter = pci_get_drvdata(pdev);
3474 struct net_device *netdev = adapter->netdev;
3475
3476 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3477
3478 adapter->eeh_err = true;
3479
3480 netif_device_detach(netdev);
3481
3482 if (netif_running(netdev)) {
3483 rtnl_lock();
3484 be_close(netdev);
3485 rtnl_unlock();
3486 }
3487 be_clear(adapter);
3488
3489 if (state == pci_channel_io_perm_failure)
3490 return PCI_ERS_RESULT_DISCONNECT;
3491
3492 pci_disable_device(pdev);
3493
3494 return PCI_ERS_RESULT_NEED_RESET;
3495}
3496
3497static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3498{
3499 struct be_adapter *adapter = pci_get_drvdata(pdev);
3500 int status;
3501
3502 dev_info(&adapter->pdev->dev, "EEH reset\n");
3503 adapter->eeh_err = false;
3504
3505 status = pci_enable_device(pdev);
3506 if (status)
3507 return PCI_ERS_RESULT_DISCONNECT;
3508
3509 pci_set_master(pdev);
3510 pci_set_power_state(pdev, 0);
3511 pci_restore_state(pdev);
3512
3513 /* Check if card is ok and fw is ready */
3514 status = be_cmd_POST(adapter);
3515 if (status)
3516 return PCI_ERS_RESULT_DISCONNECT;
3517
3518 return PCI_ERS_RESULT_RECOVERED;
3519}
3520
3521static void be_eeh_resume(struct pci_dev *pdev)
3522{
3523 int status = 0;
3524 struct be_adapter *adapter = pci_get_drvdata(pdev);
3525 struct net_device *netdev = adapter->netdev;
3526
3527 dev_info(&adapter->pdev->dev, "EEH resume\n");
3528
3529 pci_save_state(pdev);
3530
3531 /* tell fw we're ready to fire cmds */
3532 status = be_cmd_fw_init(adapter);
3533 if (status)
3534 goto err;
3535
3536 status = be_setup(adapter);
3537 if (status)
3538 goto err;
3539
3540 if (netif_running(netdev)) {
3541 status = be_open(netdev);
3542 if (status)
3543 goto err;
3544 }
3545 netif_device_attach(netdev);
3546 return;
3547err:
3548 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003549}
3550
3551static struct pci_error_handlers be_eeh_handlers = {
3552 .error_detected = be_eeh_err_detected,
3553 .slot_reset = be_eeh_reset,
3554 .resume = be_eeh_resume,
3555};
3556
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003557static struct pci_driver be_driver = {
3558 .name = DRV_NAME,
3559 .id_table = be_dev_ids,
3560 .probe = be_probe,
3561 .remove = be_remove,
3562 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003563 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003564 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003565 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003566};
3567
3568static int __init be_init_module(void)
3569{
Joe Perches8e95a202009-12-03 07:58:21 +00003570 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3571 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003572 printk(KERN_WARNING DRV_NAME
3573 " : Module param rx_frag_size must be 2048/4096/8192."
3574 " Using 2048\n");
3575 rx_frag_size = 2048;
3576 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003577
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003578 return pci_register_driver(&be_driver);
3579}
3580module_init(be_init_module);
3581
3582static void __exit be_exit_module(void)
3583{
3584 pci_unregister_driver(&be_driver);
3585}
3586module_exit(be_exit_module);