blob: 4b5e0ed49ed85bafa458edc577ab64844e1df884 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
Sathya Perla2e588f82011-03-11 02:49:26 +000028static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000030module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
50static char *ue_status_low_desc[] = {
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
85static char *ue_status_hi_desc[] = {
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
109 "NETC"
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
120static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
121{
122 struct be_dma_mem *mem = &q->dma_mem;
123 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000124 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
125 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126}
127
128static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
129 u16 len, u16 entry_size)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
132
133 memset(q, 0, sizeof(*q));
134 q->len = len;
135 q->entry_size = entry_size;
136 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000137 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
138 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139 if (!mem->va)
140 return -1;
141 memset(mem->va, 0, mem->size);
142 return 0;
143}
144
Sathya Perla8788fdc2009-07-27 22:52:03 +0000145static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000147 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 u32 reg = ioread32(addr);
149 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000150
Sathya Perlacf588472010-02-14 21:22:01 +0000151 if (adapter->eeh_err)
152 return;
153
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700159 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 iowrite32(reg, addr);
162}
163
Sathya Perla8788fdc2009-07-27 22:52:03 +0000164static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165{
166 u32 val = 0;
167 val |= qid & DB_RQ_RING_ID_MASK;
168 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000169
170 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_TXULP_RING_ID_MASK;
178 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185 bool arm, bool clear_int, u16 num_popped)
186{
187 u32 val = 0;
188 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000189 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000191
192 if (adapter->eeh_err)
193 return;
194
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000208 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000210
211 if (adapter->eeh_err)
212 return;
213
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214 if (arm)
215 val |= 1 << DB_CQ_REARM_SHIFT;
216 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218}
219
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220static int be_mac_addr_set(struct net_device *netdev, void *p)
221{
222 struct be_adapter *adapter = netdev_priv(netdev);
223 struct sockaddr *addr = p;
224 int status = 0;
225
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
228
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000229 /* MAC addr configuration will be done in hardware for VFs
230 * by their corresponding PFs. Just copy to netdev addr here
231 */
232 if (!be_physfn(adapter))
233 goto netdev_addr;
234
Ajit Khapardef8617e02011-02-11 13:36:37 +0000235 status = be_cmd_pmac_del(adapter, adapter->if_handle,
236 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 if (status)
238 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Sathya Perlaa65027e2009-08-17 00:58:04 +0000240 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000241 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000242netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243 if (!status)
244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
245
246 return status;
247}
248
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000249static void populate_be2_stats(struct be_adapter *adapter)
250{
251
252 struct be_drv_stats *drvs = &adapter->drv_stats;
253 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
254 struct be_port_rxf_stats_v0 *port_stats =
255 be_port_rxf_stats_from_cmd(adapter);
256 struct be_rxf_stats_v0 *rxf_stats =
257 be_rxf_stats_from_cmd(adapter);
258
259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273 drvs->rx_input_fifo_overflow_drop =
274 port_stats->rx_input_fifo_overflow;
275 drvs->rx_dropped_header_too_small =
276 port_stats->rx_dropped_header_too_small;
277 drvs->rx_address_match_errors =
278 port_stats->rx_address_match_errors;
279 drvs->rx_alignment_symbol_errors =
280 port_stats->rx_alignment_symbol_errors;
281
282 drvs->tx_pauseframes = port_stats->tx_pauseframes;
283 drvs->tx_controlframes = port_stats->tx_controlframes;
284
285 if (adapter->port_num)
286 drvs->jabber_events =
287 rxf_stats->port1_jabber_events;
288 else
289 drvs->jabber_events =
290 rxf_stats->port0_jabber_events;
291 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
292 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
293 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
294 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
295 drvs->forwarded_packets = rxf_stats->forwarded_packets;
296 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
297 drvs->rx_drops_no_tpre_descr =
298 rxf_stats->rx_drops_no_tpre_descr;
299 drvs->rx_drops_too_many_frags =
300 rxf_stats->rx_drops_too_many_frags;
301 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
302}
303
304static void populate_be3_stats(struct be_adapter *adapter)
305{
306 struct be_drv_stats *drvs = &adapter->drv_stats;
307 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
308
309 struct be_rxf_stats_v1 *rxf_stats =
310 be_rxf_stats_from_cmd(adapter);
311 struct be_port_rxf_stats_v1 *port_stats =
312 be_port_rxf_stats_from_cmd(adapter);
313
314 drvs->rx_priority_pause_frames = 0;
315 drvs->pmem_fifo_overflow_drop = 0;
316 drvs->rx_pause_frames = port_stats->rx_pause_frames;
317 drvs->rx_crc_errors = port_stats->rx_crc_errors;
318 drvs->rx_control_frames = port_stats->rx_control_frames;
319 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
320 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
321 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
322 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
323 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
324 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
325 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
326 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
327 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
328 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
329 drvs->rx_dropped_header_too_small =
330 port_stats->rx_dropped_header_too_small;
331 drvs->rx_input_fifo_overflow_drop =
332 port_stats->rx_input_fifo_overflow_drop;
333 drvs->rx_address_match_errors =
334 port_stats->rx_address_match_errors;
335 drvs->rx_alignment_symbol_errors =
336 port_stats->rx_alignment_symbol_errors;
337 drvs->rxpp_fifo_overflow_drop =
338 port_stats->rxpp_fifo_overflow_drop;
339 drvs->tx_pauseframes = port_stats->tx_pauseframes;
340 drvs->tx_controlframes = port_stats->tx_controlframes;
341 drvs->jabber_events = port_stats->jabber_events;
342 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346 drvs->forwarded_packets = rxf_stats->forwarded_packets;
347 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
348 drvs->rx_drops_no_tpre_descr =
349 rxf_stats->rx_drops_no_tpre_descr;
350 drvs->rx_drops_too_many_frags =
351 rxf_stats->rx_drops_too_many_frags;
352 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
353}
354
Selvin Xavier005d5692011-05-16 07:36:35 +0000355static void populate_lancer_stats(struct be_adapter *adapter)
356{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358 struct be_drv_stats *drvs = &adapter->drv_stats;
359 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
360 (adapter);
361 drvs->rx_priority_pause_frames = 0;
362 drvs->pmem_fifo_overflow_drop = 0;
363 drvs->rx_pause_frames =
364 make_64bit_val(pport_stats->rx_pause_frames_lo,
365 pport_stats->rx_pause_frames_hi);
366 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
367 pport_stats->rx_crc_errors_lo);
368 drvs->rx_control_frames =
369 make_64bit_val(pport_stats->rx_control_frames_hi,
370 pport_stats->rx_control_frames_lo);
371 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
372 drvs->rx_frame_too_long =
373 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
374 pport_stats->rx_frames_too_long_lo);
375 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
376 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
377 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
378 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
379 drvs->rx_dropped_tcp_length =
380 pport_stats->rx_dropped_invalid_tcp_length;
381 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
382 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
383 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
384 drvs->rx_dropped_header_too_small =
385 pport_stats->rx_dropped_header_too_small;
386 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
387 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
388 drvs->rx_alignment_symbol_errors =
389 make_64bit_val(pport_stats->rx_symbol_errors_hi,
390 pport_stats->rx_symbol_errors_lo);
391 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
392 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
393 pport_stats->tx_pause_frames_lo);
394 drvs->tx_controlframes =
395 make_64bit_val(pport_stats->tx_control_frames_hi,
396 pport_stats->tx_control_frames_lo);
397 drvs->jabber_events = pport_stats->rx_jabbers;
398 drvs->rx_drops_no_pbuf = 0;
399 drvs->rx_drops_no_txpb = 0;
400 drvs->rx_drops_no_erx_descr = 0;
401 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
402 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
403 pport_stats->num_forwards_lo);
404 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
405 pport_stats->rx_drops_mtu_lo);
406 drvs->rx_drops_no_tpre_descr = 0;
407 drvs->rx_drops_too_many_frags =
408 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
409 pport_stats->rx_drops_too_many_frags_lo);
410}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411
412void be_parse_stats(struct be_adapter *adapter)
413{
Selvin Xavier005d5692011-05-16 07:36:35 +0000414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000421 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422}
423
Sathya Perlab31c50a2009-09-17 10:30:13 -0700424void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde78122a52009-10-07 03:11:20 -0700427 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700428 struct be_rx_obj *rxo;
429 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700430
Sathya Perla3abcded2010-10-03 22:12:27 -0700431 memset(dev_stats, 0, sizeof(*dev_stats));
432 for_all_rx_queues(adapter, rxo, i) {
433 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
434 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
435 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
436 /* no space in linux buffers: best possible approximation */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 if (adapter->generation == BE_GEN3) {
Selvin Xavier005d5692011-05-16 07:36:35 +0000438 if (!(lancer_chip(adapter))) {
439 struct be_erx_stats_v1 *erx_stats =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000440 be_erx_stats_from_cmd(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000441 dev_stats->rx_dropped +=
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000442 erx_stats->rx_drops_no_fragments[rxo->q.id];
Selvin Xavier005d5692011-05-16 07:36:35 +0000443 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 } else {
445 struct be_erx_stats_v0 *erx_stats =
446 be_erx_stats_from_cmd(adapter);
447 dev_stats->rx_dropped +=
448 erx_stats->rx_drops_no_fragments[rxo->q.id];
449 }
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 }
451
452 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
453 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700454
455 /* bad pkts received */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000456 dev_stats->rx_errors = drvs->rx_crc_errors +
457 drvs->rx_alignment_symbol_errors +
458 drvs->rx_in_range_errors +
459 drvs->rx_out_range_errors +
460 drvs->rx_frame_too_long +
461 drvs->rx_dropped_too_small +
462 drvs->rx_dropped_too_short +
463 drvs->rx_dropped_header_too_small +
464 drvs->rx_dropped_tcp_length +
465 drvs->rx_dropped_runt +
466 drvs->rx_tcp_checksum_errs +
467 drvs->rx_ip_checksum_errs +
468 drvs->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700469
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700470 /* detailed rx errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000471 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
472 drvs->rx_out_range_errors +
473 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000474
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000475 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476
477 /* frame alignment errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000478 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000479
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480 /* receiver fifo overrun */
481 /* drops_no_pbuf is no per i/f, it's per BE card */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000482 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
483 drvs->rx_input_fifo_overflow_drop +
484 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700485}
486
Sathya Perla8788fdc2009-07-27 22:52:03 +0000487void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700488{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700489 struct net_device *netdev = adapter->netdev;
490
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700491 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000492 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000493 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000494 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700495 netif_carrier_on(netdev);
496 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000497 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000498 netif_carrier_off(netdev);
499 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000501 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700502 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
505/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700506static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507{
Sathya Perla3abcded2010-10-03 22:12:27 -0700508 struct be_eq_obj *rx_eq = &rxo->rx_eq;
509 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700510 ulong now = jiffies;
511 u32 eqd;
512
513 if (!rx_eq->enable_aic)
514 return;
515
516 /* Wrapped around */
517 if (time_before(now, stats->rx_fps_jiffies)) {
518 stats->rx_fps_jiffies = now;
519 return;
520 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700521
522 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700523 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524 return;
525
Sathya Perla3abcded2010-10-03 22:12:27 -0700526 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700527 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700528
Sathya Perla4097f662009-03-24 16:40:13 -0700529 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700530 stats->prev_rx_frags = stats->rx_frags;
531 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532 eqd = eqd << 3;
533 if (eqd > rx_eq->max_eqd)
534 eqd = rx_eq->max_eqd;
535 if (eqd < rx_eq->min_eqd)
536 eqd = rx_eq->min_eqd;
537 if (eqd < 10)
538 eqd = 0;
539 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000540 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541
542 rx_eq->cur_eqd = eqd;
543}
544
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700545static u32 be_calc_rate(u64 bytes, unsigned long ticks)
546{
547 u64 rate = bytes;
548
549 do_div(rate, ticks / HZ);
550 rate <<= 3; /* bytes/sec -> bits/sec */
551 do_div(rate, 1000000ul); /* MB/Sec */
552
553 return rate;
554}
555
Sathya Perla4097f662009-03-24 16:40:13 -0700556static void be_tx_rate_update(struct be_adapter *adapter)
557{
Sathya Perla3abcded2010-10-03 22:12:27 -0700558 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700559 ulong now = jiffies;
560
561 /* Wrapped around? */
562 if (time_before(now, stats->be_tx_jiffies)) {
563 stats->be_tx_jiffies = now;
564 return;
565 }
566
567 /* Update tx rate once in two seconds */
568 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700569 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
570 - stats->be_tx_bytes_prev,
571 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700572 stats->be_tx_jiffies = now;
573 stats->be_tx_bytes_prev = stats->be_tx_bytes;
574 }
575}
576
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000578 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579{
Sathya Perla3abcded2010-10-03 22:12:27 -0700580 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 stats->be_tx_reqs++;
582 stats->be_tx_wrbs += wrb_cnt;
583 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000584 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585 if (stopped)
586 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587}
588
589/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000590static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
591 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700593 int cnt = (skb->len > skb->data_len);
594
595 cnt += skb_shinfo(skb)->nr_frags;
596
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 /* to account for hdr wrb */
598 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000599 if (lancer_chip(adapter) || !(cnt & 1)) {
600 *dummy = false;
601 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602 /* add a dummy to make it an even num */
603 cnt++;
604 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000605 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
607 return cnt;
608}
609
610static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
611{
612 wrb->frag_pa_hi = upper_32_bits(addr);
613 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
614 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
615}
616
Somnath Koturcc4ce022010-10-21 07:11:14 -0700617static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
618 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700620 u8 vlan_prio = 0;
621 u16 vlan_tag = 0;
622
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623 memset(hdr, 0, sizeof(*hdr));
624
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
626
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000627 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
629 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
630 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000631 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000632 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000633 if (lancer_chip(adapter) && adapter->sli_family ==
634 LANCER_A0_SLI_FAMILY) {
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
636 if (is_tcp_pkt(skb))
637 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
638 tcpcs, hdr, 1);
639 else if (is_udp_pkt(skb))
640 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
641 udpcs, hdr, 1);
642 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
644 if (is_tcp_pkt(skb))
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
646 else if (is_udp_pkt(skb))
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
648 }
649
Somnath Koturcc4ce022010-10-21 07:11:14 -0700650 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700652 vlan_tag = vlan_tx_tag_get(skb);
653 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
654 /* If vlan priority provided by OS is NOT in available bmap */
655 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
656 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
657 adapter->recommended_prio;
658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 }
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
665}
666
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000667static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000668 bool unmap_single)
669{
670 dma_addr_t dma;
671
672 be_dws_le_to_cpu(wrb, sizeof(*wrb));
673
674 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000675 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000676 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000677 dma_unmap_single(dev, dma, wrb->frag_len,
678 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000679 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000680 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000681 }
682}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683
684static int make_tx_wrbs(struct be_adapter *adapter,
685 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
686{
Sathya Perla7101e112010-03-22 20:41:12 +0000687 dma_addr_t busaddr;
688 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690 struct sk_buff *first_skb = skb;
691 struct be_queue_info *txq = &adapter->tx_obj.q;
692 struct be_eth_wrb *wrb;
693 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000694 bool map_single = false;
695 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 hdr = queue_head_node(txq);
698 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700
David S. Millerebc8d2a2009-06-09 01:01:31 -0700701 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700702 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000705 goto dma_err;
706 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700707 wrb = queue_head_node(txq);
708 wrb_fill(wrb, busaddr, len);
709 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710 queue_head_inc(txq);
711 copied += len;
712 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713
David S. Millerebc8d2a2009-06-09 01:01:31 -0700714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715 struct skb_frag_struct *frag =
716 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000717 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
718 frag->size, DMA_TO_DEVICE);
719 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000720 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700721 wrb = queue_head_node(txq);
722 wrb_fill(wrb, busaddr, frag->size);
723 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724 queue_head_inc(txq);
725 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 }
727
728 if (dummy_wrb) {
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, 0, 0);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 }
734
Somnath Koturcc4ce022010-10-21 07:11:14 -0700735 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000739dma_err:
740 txq->head = map_head;
741 while (copied) {
742 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000743 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000744 map_single = false;
745 copied -= wrb->frag_len;
746 queue_head_inc(txq);
747 }
748 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749}
750
Stephen Hemminger613573252009-08-31 19:50:58 +0000751static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700752 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753{
754 struct be_adapter *adapter = netdev_priv(netdev);
755 struct be_tx_obj *tx_obj = &adapter->tx_obj;
756 struct be_queue_info *txq = &tx_obj->q;
757 u32 wrb_cnt = 0, copied = 0;
758 u32 start = txq->head;
759 bool dummy_wrb, stopped = false;
760
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000761 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762
763 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000764 if (copied) {
765 /* record the sent skb in the sent_skb table */
766 BUG_ON(tx_obj->sent_skb_list[start]);
767 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000769 /* Ensure txq has space for the next skb; Else stop the queue
770 * *BEFORE* ringing the tx doorbell, so that we serialze the
771 * tx compls of the current transmit which'll wake up the queue
772 */
Sathya Perla7101e112010-03-22 20:41:12 +0000773 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000774 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
775 txq->len) {
776 netif_stop_queue(netdev);
777 stopped = true;
778 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000780 be_txq_notify(adapter, txq->id, wrb_cnt);
781
Ajit Khaparde91992e42010-02-19 13:57:12 +0000782 be_tx_stats_update(adapter, wrb_cnt, copied,
783 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000784 } else {
785 txq->head = start;
786 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 return NETDEV_TX_OK;
789}
790
791static int be_change_mtu(struct net_device *netdev, int new_mtu)
792{
793 struct be_adapter *adapter = netdev_priv(netdev);
794 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000795 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
796 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797 dev_info(&adapter->pdev->dev,
798 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000799 BE_MIN_MTU,
800 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 return -EINVAL;
802 }
803 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
804 netdev->mtu, new_mtu);
805 netdev->mtu = new_mtu;
806 return 0;
807}
808
809/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000810 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
811 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000813static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815 u16 vtag[BE_NUM_VLANS_SUPPORTED];
816 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000817 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000818 u32 if_handle;
819
820 if (vf) {
821 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
822 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
823 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
824 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825
Ajit Khaparde82903e42010-02-09 01:34:57 +0000826 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000828 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829 if (adapter->vlan_tag[i]) {
830 vtag[ntags] = cpu_to_le16(i);
831 ntags++;
832 }
833 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700834 status = be_cmd_vlan_config(adapter, adapter->if_handle,
835 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700837 status = be_cmd_vlan_config(adapter, adapter->if_handle,
838 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000840
Sathya Perlab31c50a2009-09-17 10:30:13 -0700841 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842}
843
844static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
845{
846 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700847
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849}
850
851static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
852{
853 struct be_adapter *adapter = netdev_priv(netdev);
854
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000855 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000856 if (!be_physfn(adapter))
857 return;
858
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000860 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000861 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700862}
863
864static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
865{
866 struct be_adapter *adapter = netdev_priv(netdev);
867
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000868 adapter->vlans_added--;
869 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
870
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000871 if (!be_physfn(adapter))
872 return;
873
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000875 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000876 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877}
878
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879static void be_set_multicast_list(struct net_device *netdev)
880{
881 struct be_adapter *adapter = netdev_priv(netdev);
882
883 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000884 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000885 adapter->promiscuous = true;
886 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700887 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000888
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300889 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000890 if (adapter->promiscuous) {
891 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000892 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000893 }
894
Sathya Perlae7b909a2009-11-22 22:01:10 +0000895 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000896 if (netdev->flags & IFF_ALLMULTI ||
897 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000898 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000899 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000900 goto done;
901 }
902
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000903 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800904 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000905done:
906 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907}
908
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000909static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
910{
911 struct be_adapter *adapter = netdev_priv(netdev);
912 int status;
913
914 if (!adapter->sriov_enabled)
915 return -EPERM;
916
917 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
918 return -EINVAL;
919
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000920 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
921 status = be_cmd_pmac_del(adapter,
922 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000923 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000924
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000925 status = be_cmd_pmac_add(adapter, mac,
926 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000927 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000928
929 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000930 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
931 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000932 else
933 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
934
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000935 return status;
936}
937
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000938static int be_get_vf_config(struct net_device *netdev, int vf,
939 struct ifla_vf_info *vi)
940{
941 struct be_adapter *adapter = netdev_priv(netdev);
942
943 if (!adapter->sriov_enabled)
944 return -EPERM;
945
946 if (vf >= num_vfs)
947 return -EINVAL;
948
949 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000950 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000951 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000952 vi->qos = 0;
953 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
954
955 return 0;
956}
957
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000958static int be_set_vf_vlan(struct net_device *netdev,
959 int vf, u16 vlan, u8 qos)
960{
961 struct be_adapter *adapter = netdev_priv(netdev);
962 int status = 0;
963
964 if (!adapter->sriov_enabled)
965 return -EPERM;
966
967 if ((vf >= num_vfs) || (vlan > 4095))
968 return -EINVAL;
969
970 if (vlan) {
971 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
972 adapter->vlans_added++;
973 } else {
974 adapter->vf_cfg[vf].vf_vlan_tag = 0;
975 adapter->vlans_added--;
976 }
977
978 status = be_vid_config(adapter, true, vf);
979
980 if (status)
981 dev_info(&adapter->pdev->dev,
982 "VLAN %d config on VF %d failed\n", vlan, vf);
983 return status;
984}
985
Ajit Khapardee1d18732010-07-23 01:52:13 +0000986static int be_set_vf_tx_rate(struct net_device *netdev,
987 int vf, int rate)
988{
989 struct be_adapter *adapter = netdev_priv(netdev);
990 int status = 0;
991
992 if (!adapter->sriov_enabled)
993 return -EPERM;
994
995 if ((vf >= num_vfs) || (rate < 0))
996 return -EINVAL;
997
998 if (rate > 10000)
999 rate = 10000;
1000
1001 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +00001002 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001003
1004 if (status)
1005 dev_info(&adapter->pdev->dev,
1006 "tx rate %d on VF %d failed\n", rate, vf);
1007 return status;
1008}
1009
Sathya Perla3abcded2010-10-03 22:12:27 -07001010static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001011{
Sathya Perla3abcded2010-10-03 22:12:27 -07001012 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001013 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014
Sathya Perla4097f662009-03-24 16:40:13 -07001015 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001016 if (time_before(now, stats->rx_jiffies)) {
1017 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001018 return;
1019 }
1020
1021 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -07001022 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001023 return;
1024
Sathya Perla3abcded2010-10-03 22:12:27 -07001025 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1026 now - stats->rx_jiffies);
1027 stats->rx_jiffies = now;
1028 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -07001029}
1030
Sathya Perla3abcded2010-10-03 22:12:27 -07001031static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001032 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001033{
Sathya Perla3abcded2010-10-03 22:12:27 -07001034 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001035
Sathya Perla3abcded2010-10-03 22:12:27 -07001036 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001037 stats->rx_frags += rxcp->num_rcvd;
1038 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001039 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001040 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001041 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001042 if (rxcp->err)
1043 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044}
1045
Sathya Perla2e588f82011-03-11 02:49:26 +00001046static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001047{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001048 /* L4 checksum is not reliable for non TCP/UDP packets.
1049 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001050 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1051 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001052}
1053
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001055get_rx_page_info(struct be_adapter *adapter,
1056 struct be_rx_obj *rxo,
1057 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058{
1059 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001060 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Sathya Perla3abcded2010-10-03 22:12:27 -07001062 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063 BUG_ON(!rx_page_info->page);
1064
Ajit Khaparde205859a2010-02-09 01:34:21 +00001065 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001066 dma_unmap_page(&adapter->pdev->dev,
1067 dma_unmap_addr(rx_page_info, bus),
1068 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001069 rx_page_info->last_page_user = false;
1070 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071
1072 atomic_dec(&rxq->used);
1073 return rx_page_info;
1074}
1075
1076/* Throwaway the data in the Rx completion */
1077static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001078 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001079 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080{
Sathya Perla3abcded2010-10-03 22:12:27 -07001081 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001083 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001084
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001085 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001086 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001087 put_page(page_info->page);
1088 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001089 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090 }
1091}
1092
1093/*
1094 * skb_fill_rx_data forms a complete skb for an ether frame
1095 * indicated by rxcp.
1096 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001097static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001098 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099{
Sathya Perla3abcded2010-10-03 22:12:27 -07001100 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001102 u16 i, j;
1103 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104 u8 *start;
1105
Sathya Perla2e588f82011-03-11 02:49:26 +00001106 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107 start = page_address(page_info->page) + page_info->page_offset;
1108 prefetch(start);
1109
1110 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001111 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112
1113 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001114 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115 memcpy(skb->data, start, hdr_len);
1116 skb->len = curr_frag_len;
1117 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1118 /* Complete packet has now been moved to data */
1119 put_page(page_info->page);
1120 skb->data_len = 0;
1121 skb->tail += curr_frag_len;
1122 } else {
1123 skb_shinfo(skb)->nr_frags = 1;
1124 skb_shinfo(skb)->frags[0].page = page_info->page;
1125 skb_shinfo(skb)->frags[0].page_offset =
1126 page_info->page_offset + hdr_len;
1127 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1128 skb->data_len = curr_frag_len - hdr_len;
1129 skb->tail += hdr_len;
1130 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001131 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132
Sathya Perla2e588f82011-03-11 02:49:26 +00001133 if (rxcp->pkt_size <= rx_frag_size) {
1134 BUG_ON(rxcp->num_rcvd != 1);
1135 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136 }
1137
1138 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001139 index_inc(&rxcp->rxq_idx, rxq->len);
1140 remaining = rxcp->pkt_size - curr_frag_len;
1141 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1142 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1143 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001145 /* Coalesce all frags from the same physical page in one slot */
1146 if (page_info->page_offset == 0) {
1147 /* Fresh page */
1148 j++;
1149 skb_shinfo(skb)->frags[j].page = page_info->page;
1150 skb_shinfo(skb)->frags[j].page_offset =
1151 page_info->page_offset;
1152 skb_shinfo(skb)->frags[j].size = 0;
1153 skb_shinfo(skb)->nr_frags++;
1154 } else {
1155 put_page(page_info->page);
1156 }
1157
1158 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159 skb->len += curr_frag_len;
1160 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161
Sathya Perla2e588f82011-03-11 02:49:26 +00001162 remaining -= curr_frag_len;
1163 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001164 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001166 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001167}
1168
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001169/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001171 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001172 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001174 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001175 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001176
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001177 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001178 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179 if (net_ratelimit())
1180 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001181 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 return;
1183 }
1184
Sathya Perla2e588f82011-03-11 02:49:26 +00001185 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001187 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001188 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001189 else
1190 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191
1192 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001193 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001194 if (adapter->netdev->features & NETIF_F_RXHASH)
1195 skb->rxhash = rxcp->rss_hash;
1196
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001199 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 kfree_skb(skb);
1201 return;
1202 }
Somnath Kotur6709d952011-05-04 22:40:46 +00001203 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1204 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 } else {
1206 netif_receive_skb(skb);
1207 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208}
1209
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001210/* Process the RX completion indicated by rxcp when GRO is enabled */
1211static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001212 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001213 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214{
1215 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001216 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001217 struct be_queue_info *rxq = &rxo->q;
1218 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001219 u16 remaining, curr_frag_len;
1220 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001221
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001222 skb = napi_get_frags(&eq_obj->napi);
1223 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001224 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001225 return;
1226 }
1227
Sathya Perla2e588f82011-03-11 02:49:26 +00001228 remaining = rxcp->pkt_size;
1229 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1230 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231
1232 curr_frag_len = min(remaining, rx_frag_size);
1233
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001234 /* Coalesce all frags from the same physical page in one slot */
1235 if (i == 0 || page_info->page_offset == 0) {
1236 /* First frag or Fresh page */
1237 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001238 skb_shinfo(skb)->frags[j].page = page_info->page;
1239 skb_shinfo(skb)->frags[j].page_offset =
1240 page_info->page_offset;
1241 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001242 } else {
1243 put_page(page_info->page);
1244 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001245 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001246
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001248 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 memset(page_info, 0, sizeof(*page_info));
1250 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001251 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001253 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001254 skb->len = rxcp->pkt_size;
1255 skb->data_len = rxcp->pkt_size;
1256 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001257 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001258 if (adapter->netdev->features & NETIF_F_RXHASH)
1259 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001260
Sathya Perla2e588f82011-03-11 02:49:26 +00001261 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001262 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001263 else
Somnath Kotur6709d952011-05-04 22:40:46 +00001264 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1265 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266}
1267
Sathya Perla2e588f82011-03-11 02:49:26 +00001268static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1269 struct be_eth_rx_compl *compl,
1270 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271{
Sathya Perla2e588f82011-03-11 02:49:26 +00001272 rxcp->pkt_size =
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1274 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1275 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1276 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001277 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001278 rxcp->ip_csum =
1279 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1280 rxcp->l4_csum =
1281 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1282 rxcp->ipv6 =
1283 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1284 rxcp->rxq_idx =
1285 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1286 rxcp->num_rcvd =
1287 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1288 rxcp->pkt_type =
1289 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001290 rxcp->rss_hash =
1291 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001292 if (rxcp->vlanf) {
1293 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001294 compl);
1295 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1296 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001297 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001298}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299
Sathya Perla2e588f82011-03-11 02:49:26 +00001300static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1301 struct be_eth_rx_compl *compl,
1302 struct be_rx_compl_info *rxcp)
1303{
1304 rxcp->pkt_size =
1305 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1306 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1307 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1308 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001309 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001310 rxcp->ip_csum =
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1312 rxcp->l4_csum =
1313 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1314 rxcp->ipv6 =
1315 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1316 rxcp->rxq_idx =
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1318 rxcp->num_rcvd =
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1320 rxcp->pkt_type =
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001322 rxcp->rss_hash =
1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001324 if (rxcp->vlanf) {
1325 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001326 compl);
1327 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1328 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001329 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001330}
1331
1332static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1333{
1334 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1335 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1336 struct be_adapter *adapter = rxo->adapter;
1337
1338 /* For checking the valid bit it is Ok to use either definition as the
1339 * valid bit is at the same position in both v0 and v1 Rx compl */
1340 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 return NULL;
1342
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001343 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001344 be_dws_le_to_cpu(compl, sizeof(*compl));
1345
1346 if (adapter->be3_native)
1347 be_parse_rx_compl_v1(adapter, compl, rxcp);
1348 else
1349 be_parse_rx_compl_v0(adapter, compl, rxcp);
1350
Sathya Perla15d72182011-03-21 20:49:26 +00001351 if (rxcp->vlanf) {
1352 /* vlanf could be wrongly set in some cards.
1353 * ignore if vtm is not set */
1354 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1355 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001356
Sathya Perla15d72182011-03-21 20:49:26 +00001357 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001358 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001359
David S. Miller3c709f82011-05-11 14:26:15 -04001360 if (((adapter->pvid & VLAN_VID_MASK) ==
1361 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1362 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001363 rxcp->vlanf = 0;
1364 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001365
1366 /* As the compl has been parsed, reset it; we wont touch it again */
1367 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368
Sathya Perla3abcded2010-10-03 22:12:27 -07001369 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 return rxcp;
1371}
1372
Eric Dumazet1829b082011-03-01 05:48:12 +00001373static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001376
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001378 gfp |= __GFP_COMP;
1379 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380}
1381
1382/*
1383 * Allocate a page, split it to fragments of size rx_frag_size and post as
1384 * receive buffers to BE
1385 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001386static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387{
Sathya Perla3abcded2010-10-03 22:12:27 -07001388 struct be_adapter *adapter = rxo->adapter;
1389 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001390 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001391 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 struct page *pagep = NULL;
1393 struct be_eth_rx_d *rxd;
1394 u64 page_dmaaddr = 0, frag_dmaaddr;
1395 u32 posted, page_offset = 0;
1396
Sathya Perla3abcded2010-10-03 22:12:27 -07001397 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1399 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001400 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001402 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 break;
1404 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001405 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1406 0, adapter->big_page_size,
1407 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 page_info->page_offset = 0;
1409 } else {
1410 get_page(pagep);
1411 page_info->page_offset = page_offset + rx_frag_size;
1412 }
1413 page_offset = page_info->page_offset;
1414 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001415 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1417
1418 rxd = queue_head_node(rxq);
1419 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1420 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421
1422 /* Any space left in the current big page for another frag? */
1423 if ((page_offset + rx_frag_size + rx_frag_size) >
1424 adapter->big_page_size) {
1425 pagep = NULL;
1426 page_info->last_page_user = true;
1427 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001428
1429 prev_page_info = page_info;
1430 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431 page_info = &page_info_tbl[rxq->head];
1432 }
1433 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001434 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435
1436 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001438 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001439 } else if (atomic_read(&rxq->used) == 0) {
1440 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001441 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443}
1444
Sathya Perla5fb379e2009-06-18 00:02:59 +00001445static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1448
1449 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1450 return NULL;
1451
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001452 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1454
1455 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1456
1457 queue_tail_inc(tx_cq);
1458 return txcp;
1459}
1460
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001461static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462{
1463 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001464 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1466 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001467 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1468 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001470 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001472 sent_skbs[txq->tail] = NULL;
1473
1474 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001475 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001477 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001479 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001480 unmap_tx_frag(&adapter->pdev->dev, wrb,
1481 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001482 unmap_skb_hdr = false;
1483
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 num_wrbs++;
1485 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001486 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001489 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
Sathya Perla859b1e42009-08-10 03:43:51 +00001492static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1493{
1494 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1495
1496 if (!eqe->evt)
1497 return NULL;
1498
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001499 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001500 eqe->evt = le32_to_cpu(eqe->evt);
1501 queue_tail_inc(&eq_obj->q);
1502 return eqe;
1503}
1504
1505static int event_handle(struct be_adapter *adapter,
1506 struct be_eq_obj *eq_obj)
1507{
1508 struct be_eq_entry *eqe;
1509 u16 num = 0;
1510
1511 while ((eqe = event_get(eq_obj)) != NULL) {
1512 eqe->evt = 0;
1513 num++;
1514 }
1515
1516 /* Deal with any spurious interrupts that come
1517 * without events
1518 */
1519 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1520 if (num)
1521 napi_schedule(&eq_obj->napi);
1522
1523 return num;
1524}
1525
1526/* Just read and notify events without processing them.
1527 * Used at the time of destroying event queues */
1528static void be_eq_clean(struct be_adapter *adapter,
1529 struct be_eq_obj *eq_obj)
1530{
1531 struct be_eq_entry *eqe;
1532 u16 num = 0;
1533
1534 while ((eqe = event_get(eq_obj)) != NULL) {
1535 eqe->evt = 0;
1536 num++;
1537 }
1538
1539 if (num)
1540 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1541}
1542
Sathya Perla3abcded2010-10-03 22:12:27 -07001543static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544{
1545 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001546 struct be_queue_info *rxq = &rxo->q;
1547 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001548 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 u16 tail;
1550
1551 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001552 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1553 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001554 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 }
1556
1557 /* Then free posted rx buffer that were not used */
1558 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001559 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001560 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 put_page(page_info->page);
1562 memset(page_info, 0, sizeof(*page_info));
1563 }
1564 BUG_ON(atomic_read(&rxq->used));
1565}
1566
Sathya Perlaa8e91792009-08-10 03:42:43 +00001567static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001569 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001571 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001572 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001573 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1574 struct sk_buff *sent_skb;
1575 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576
Sathya Perlaa8e91792009-08-10 03:42:43 +00001577 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1578 do {
1579 while ((txcp = be_tx_compl_get(tx_cq))) {
1580 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1581 wrb_index, txcp);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001582 num_wrbs += be_tx_compl_process(adapter, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001583 cmpl++;
1584 }
1585 if (cmpl) {
1586 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001587 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001588 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001589 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001590 }
1591
1592 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1593 break;
1594
1595 mdelay(1);
1596 } while (true);
1597
1598 if (atomic_read(&txq->used))
1599 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1600 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001601
1602 /* free posted tx for which compls will never arrive */
1603 while (atomic_read(&txq->used)) {
1604 sent_skb = sent_skbs[txq->tail];
1605 end_idx = txq->tail;
1606 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001607 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1608 txq->len);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001609 num_wrbs = be_tx_compl_process(adapter, end_idx);
1610 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001611 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612}
1613
Sathya Perla5fb379e2009-06-18 00:02:59 +00001614static void be_mcc_queues_destroy(struct be_adapter *adapter)
1615{
1616 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001617
Sathya Perla8788fdc2009-07-27 22:52:03 +00001618 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001619 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001620 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001621 be_queue_free(adapter, q);
1622
Sathya Perla8788fdc2009-07-27 22:52:03 +00001623 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001624 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001625 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001626 be_queue_free(adapter, q);
1627}
1628
1629/* Must be called only after TX qs are created as MCC shares TX EQ */
1630static int be_mcc_queues_create(struct be_adapter *adapter)
1631{
1632 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001633
1634 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001635 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001636 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001637 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001638 goto err;
1639
1640 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001641 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001642 goto mcc_cq_free;
1643
1644 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001645 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001646 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1647 goto mcc_cq_destroy;
1648
1649 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001650 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001651 goto mcc_q_free;
1652
1653 return 0;
1654
1655mcc_q_free:
1656 be_queue_free(adapter, q);
1657mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001658 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001659mcc_cq_free:
1660 be_queue_free(adapter, cq);
1661err:
1662 return -1;
1663}
1664
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665static void be_tx_queues_destroy(struct be_adapter *adapter)
1666{
1667 struct be_queue_info *q;
1668
1669 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001670 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001671 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672 be_queue_free(adapter, q);
1673
1674 q = &adapter->tx_obj.cq;
1675 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001676 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 be_queue_free(adapter, q);
1678
Sathya Perla859b1e42009-08-10 03:43:51 +00001679 /* Clear any residual events */
1680 be_eq_clean(adapter, &adapter->tx_eq);
1681
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 q = &adapter->tx_eq.q;
1683 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001684 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685 be_queue_free(adapter, q);
1686}
1687
1688static int be_tx_queues_create(struct be_adapter *adapter)
1689{
1690 struct be_queue_info *eq, *q, *cq;
1691
1692 adapter->tx_eq.max_eqd = 0;
1693 adapter->tx_eq.min_eqd = 0;
1694 adapter->tx_eq.cur_eqd = 96;
1695 adapter->tx_eq.enable_aic = false;
1696 /* Alloc Tx Event queue */
1697 eq = &adapter->tx_eq.q;
1698 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1699 return -1;
1700
1701 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001702 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001704
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001705 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001706
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001707
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 /* Alloc TX eth compl queue */
1709 cq = &adapter->tx_obj.cq;
1710 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1711 sizeof(struct be_eth_tx_compl)))
1712 goto tx_eq_destroy;
1713
1714 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001715 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 goto tx_cq_free;
1717
1718 /* Alloc TX eth queue */
1719 q = &adapter->tx_obj.q;
1720 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1721 goto tx_cq_destroy;
1722
1723 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001724 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 goto tx_q_free;
1726 return 0;
1727
1728tx_q_free:
1729 be_queue_free(adapter, q);
1730tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001731 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001732tx_cq_free:
1733 be_queue_free(adapter, cq);
1734tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001735 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736tx_eq_free:
1737 be_queue_free(adapter, eq);
1738 return -1;
1739}
1740
1741static void be_rx_queues_destroy(struct be_adapter *adapter)
1742{
1743 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001744 struct be_rx_obj *rxo;
1745 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746
Sathya Perla3abcded2010-10-03 22:12:27 -07001747 for_all_rx_queues(adapter, rxo, i) {
1748 q = &rxo->q;
1749 if (q->created) {
1750 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1751 /* After the rxq is invalidated, wait for a grace time
1752 * of 1ms for all dma to end and the flush compl to
1753 * arrive
1754 */
1755 mdelay(1);
1756 be_rx_q_clean(adapter, rxo);
1757 }
1758 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001759
Sathya Perla3abcded2010-10-03 22:12:27 -07001760 q = &rxo->cq;
1761 if (q->created)
1762 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1763 be_queue_free(adapter, q);
1764
1765 /* Clear any residual events */
1766 q = &rxo->rx_eq.q;
1767 if (q->created) {
1768 be_eq_clean(adapter, &rxo->rx_eq);
1769 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1770 }
1771 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773}
1774
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001775static u32 be_num_rxqs_want(struct be_adapter *adapter)
1776{
1777 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1778 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1779 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1780 } else {
1781 dev_warn(&adapter->pdev->dev,
1782 "No support for multiple RX queues\n");
1783 return 1;
1784 }
1785}
1786
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787static int be_rx_queues_create(struct be_adapter *adapter)
1788{
1789 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001790 struct be_rx_obj *rxo;
1791 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001793 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1794 msix_enabled(adapter) ?
1795 adapter->num_msix_vec - 1 : 1);
1796 if (adapter->num_rx_qs != MAX_RX_QS)
1797 dev_warn(&adapter->pdev->dev,
1798 "Can create only %d RX queues", adapter->num_rx_qs);
1799
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 for_all_rx_queues(adapter, rxo, i) {
1802 rxo->adapter = adapter;
1803 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1804 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805
Sathya Perla3abcded2010-10-03 22:12:27 -07001806 /* EQ */
1807 eq = &rxo->rx_eq.q;
1808 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1809 sizeof(struct be_eq_entry));
1810 if (rc)
1811 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812
Sathya Perla3abcded2010-10-03 22:12:27 -07001813 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1814 if (rc)
1815 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001817 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001818
Sathya Perla3abcded2010-10-03 22:12:27 -07001819 /* CQ */
1820 cq = &rxo->cq;
1821 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1822 sizeof(struct be_eth_rx_compl));
1823 if (rc)
1824 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825
Sathya Perla3abcded2010-10-03 22:12:27 -07001826 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1827 if (rc)
1828 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001829 /* Rx Q */
1830 q = &rxo->q;
1831 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1832 sizeof(struct be_eth_rx_d));
1833 if (rc)
1834 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835
Sathya Perla3abcded2010-10-03 22:12:27 -07001836 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1837 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1838 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1839 if (rc)
1840 goto err;
1841 }
1842
1843 if (be_multi_rxq(adapter)) {
1844 u8 rsstable[MAX_RSS_QS];
1845
1846 for_all_rss_queues(adapter, rxo, i)
1847 rsstable[i] = rxo->rss_id;
1848
1849 rc = be_cmd_rss_config(adapter, rsstable,
1850 adapter->num_rx_qs - 1);
1851 if (rc)
1852 goto err;
1853 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854
1855 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001856err:
1857 be_rx_queues_destroy(adapter);
1858 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001861static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001862{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001863 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1864 if (!eqe->evt)
1865 return false;
1866 else
1867 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001868}
1869
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870static irqreturn_t be_intx(int irq, void *dev)
1871{
1872 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001873 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001874 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001876 if (lancer_chip(adapter)) {
1877 if (event_peek(&adapter->tx_eq))
1878 tx = event_handle(adapter, &adapter->tx_eq);
1879 for_all_rx_queues(adapter, rxo, i) {
1880 if (event_peek(&rxo->rx_eq))
1881 rx |= event_handle(adapter, &rxo->rx_eq);
1882 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001884 if (!(tx || rx))
1885 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001886
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001887 } else {
1888 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1889 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1890 if (!isr)
1891 return IRQ_NONE;
1892
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001893 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001894 event_handle(adapter, &adapter->tx_eq);
1895
1896 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001897 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001898 event_handle(adapter, &rxo->rx_eq);
1899 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001900 }
Sathya Perlac001c212009-07-01 01:06:07 +00001901
Sathya Perla8788fdc2009-07-27 22:52:03 +00001902 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903}
1904
1905static irqreturn_t be_msix_rx(int irq, void *dev)
1906{
Sathya Perla3abcded2010-10-03 22:12:27 -07001907 struct be_rx_obj *rxo = dev;
1908 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909
Sathya Perla3abcded2010-10-03 22:12:27 -07001910 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911
1912 return IRQ_HANDLED;
1913}
1914
Sathya Perla5fb379e2009-06-18 00:02:59 +00001915static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916{
1917 struct be_adapter *adapter = dev;
1918
Sathya Perla8788fdc2009-07-27 22:52:03 +00001919 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920
1921 return IRQ_HANDLED;
1922}
1923
Sathya Perla2e588f82011-03-11 02:49:26 +00001924static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001925{
Sathya Perla2e588f82011-03-11 02:49:26 +00001926 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927}
1928
stephen hemminger49b05222010-10-21 07:50:48 +00001929static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930{
1931 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001932 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1933 struct be_adapter *adapter = rxo->adapter;
1934 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001935 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 u32 work_done;
1937
Sathya Perla3abcded2010-10-03 22:12:27 -07001938 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001940 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 if (!rxcp)
1942 break;
1943
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001944 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001945 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001946 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001947 be_rx_compl_process_gro(adapter, rxo, rxcp);
1948 else
1949 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001950 } else if (rxcp->pkt_size == 0) {
1951 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001952 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001953
Sathya Perla2e588f82011-03-11 02:49:26 +00001954 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955 }
1956
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001958 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001959 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960
1961 /* All consumed */
1962 if (work_done < budget) {
1963 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001964 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 } else {
1966 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001967 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968 }
1969 return work_done;
1970}
1971
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001972/* As TX and MCC share the same EQ check for both TX and MCC completions.
1973 * For TX/MCC we don't honour budget; consume everything
1974 */
1975static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001977 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1978 struct be_adapter *adapter =
1979 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001980 struct be_queue_info *txq = &adapter->tx_obj.q;
1981 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001983 int tx_compl = 0, mcc_compl, status = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001984 u16 end_idx, num_wrbs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001985
Sathya Perla5fb379e2009-06-18 00:02:59 +00001986 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001988 wrb_index, txcp);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001989 num_wrbs += be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001990 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991 }
1992
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001993 mcc_compl = be_process_mcc(adapter, &status);
1994
1995 napi_complete(napi);
1996
1997 if (mcc_compl) {
1998 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1999 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2000 }
2001
2002 if (tx_compl) {
2003 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002004
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002005 atomic_sub(num_wrbs, &txq->used);
2006
Sathya Perla5fb379e2009-06-18 00:02:59 +00002007 /* As Tx wrbs have been freed up, wake up netdev queue if
2008 * it was stopped due to lack of tx wrbs.
2009 */
2010 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00002012 netif_wake_queue(adapter->netdev);
2013 }
2014
Sathya Perla3abcded2010-10-03 22:12:27 -07002015 tx_stats(adapter)->be_tx_events++;
2016 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018
2019 return 1;
2020}
2021
Ajit Khaparded053de92010-09-03 06:23:30 +00002022void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002023{
2024 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2025 u32 i;
2026
2027 pci_read_config_dword(adapter->pdev,
2028 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2029 pci_read_config_dword(adapter->pdev,
2030 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2031 pci_read_config_dword(adapter->pdev,
2032 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2033 pci_read_config_dword(adapter->pdev,
2034 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2035
2036 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2037 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2038
Ajit Khaparded053de92010-09-03 06:23:30 +00002039 if (ue_status_lo || ue_status_hi) {
2040 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002041 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00002042 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2043 }
2044
Ajit Khaparde7c185272010-07-29 06:16:33 +00002045 if (ue_status_lo) {
2046 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2047 if (ue_status_lo & 1)
2048 dev_err(&adapter->pdev->dev,
2049 "UE: %s bit set\n", ue_status_low_desc[i]);
2050 }
2051 }
2052 if (ue_status_hi) {
2053 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2054 if (ue_status_hi & 1)
2055 dev_err(&adapter->pdev->dev,
2056 "UE: %s bit set\n", ue_status_hi_desc[i]);
2057 }
2058 }
2059
2060}
2061
Sathya Perlaea1dae12009-03-19 23:56:20 -07002062static void be_worker(struct work_struct *work)
2063{
2064 struct be_adapter *adapter =
2065 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002066 struct be_rx_obj *rxo;
2067 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002068
Sathya Perla16da8252011-03-21 20:49:27 +00002069 if (!adapter->ue_detected && !lancer_chip(adapter))
2070 be_detect_dump_ue(adapter);
2071
Somnath Koturf203af72010-10-25 23:01:03 +00002072 /* when interrupts are not yet enabled, just reap any pending
2073 * mcc completions */
2074 if (!netif_running(adapter->netdev)) {
2075 int mcc_compl, status = 0;
2076
2077 mcc_compl = be_process_mcc(adapter, &status);
2078
2079 if (mcc_compl) {
2080 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2081 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2082 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002083
Somnath Koturf203af72010-10-25 23:01:03 +00002084 goto reschedule;
2085 }
2086
Selvin Xavier005d5692011-05-16 07:36:35 +00002087 if (!adapter->stats_cmd_sent) {
2088 if (lancer_chip(adapter))
2089 lancer_cmd_get_pport_stats(adapter,
2090 &adapter->stats_cmd);
2091 else
2092 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2093 }
Sathya Perla4097f662009-03-24 16:40:13 -07002094 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07002095
Sathya Perla3abcded2010-10-03 22:12:27 -07002096 for_all_rx_queues(adapter, rxo, i) {
2097 be_rx_rate_update(rxo);
2098 be_rx_eqd_update(adapter, rxo);
2099
2100 if (rxo->rx_post_starved) {
2101 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002102 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002103 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002104 }
2105
Somnath Koturf203af72010-10-25 23:01:03 +00002106reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002107 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002108 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2109}
2110
Sathya Perla8d56ff12009-11-22 22:02:26 +00002111static void be_msix_disable(struct be_adapter *adapter)
2112{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002113 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002114 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002115 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002116 }
2117}
2118
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119static void be_msix_enable(struct be_adapter *adapter)
2120{
Sathya Perla3abcded2010-10-03 22:12:27 -07002121#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002122 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002124 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002125
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002126 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127 adapter->msix_entries[i].entry = i;
2128
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002129 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002130 if (status == 0) {
2131 goto done;
2132 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002133 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002134 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002135 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002136 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002137 }
2138 return;
2139done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002140 adapter->num_msix_vec = num_vec;
2141 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142}
2143
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002144static void be_sriov_enable(struct be_adapter *adapter)
2145{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002146 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002147#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002148 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002149 int status, pos;
2150 u16 nvfs;
2151
2152 pos = pci_find_ext_capability(adapter->pdev,
2153 PCI_EXT_CAP_ID_SRIOV);
2154 pci_read_config_word(adapter->pdev,
2155 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2156
2157 if (num_vfs > nvfs) {
2158 dev_info(&adapter->pdev->dev,
2159 "Device supports %d VFs and not %d\n",
2160 nvfs, num_vfs);
2161 num_vfs = nvfs;
2162 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002163
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002164 status = pci_enable_sriov(adapter->pdev, num_vfs);
2165 adapter->sriov_enabled = status ? false : true;
2166 }
2167#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002168}
2169
2170static void be_sriov_disable(struct be_adapter *adapter)
2171{
2172#ifdef CONFIG_PCI_IOV
2173 if (adapter->sriov_enabled) {
2174 pci_disable_sriov(adapter->pdev);
2175 adapter->sriov_enabled = false;
2176 }
2177#endif
2178}
2179
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002180static inline int be_msix_vec_get(struct be_adapter *adapter,
2181 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002183 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002184}
2185
2186static int be_request_irq(struct be_adapter *adapter,
2187 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002188 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002189{
2190 struct net_device *netdev = adapter->netdev;
2191 int vec;
2192
2193 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002194 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002195 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002196}
2197
Sathya Perla3abcded2010-10-03 22:12:27 -07002198static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2199 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002200{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002201 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002202 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203}
2204
2205static int be_msix_register(struct be_adapter *adapter)
2206{
Sathya Perla3abcded2010-10-03 22:12:27 -07002207 struct be_rx_obj *rxo;
2208 int status, i;
2209 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002210
Sathya Perla3abcded2010-10-03 22:12:27 -07002211 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2212 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213 if (status)
2214 goto err;
2215
Sathya Perla3abcded2010-10-03 22:12:27 -07002216 for_all_rx_queues(adapter, rxo, i) {
2217 sprintf(qname, "rxq%d", i);
2218 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2219 qname, rxo);
2220 if (status)
2221 goto err_msix;
2222 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002223
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002225
Sathya Perla3abcded2010-10-03 22:12:27 -07002226err_msix:
2227 be_free_irq(adapter, &adapter->tx_eq, adapter);
2228
2229 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2230 be_free_irq(adapter, &rxo->rx_eq, rxo);
2231
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232err:
2233 dev_warn(&adapter->pdev->dev,
2234 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002235 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236 return status;
2237}
2238
2239static int be_irq_register(struct be_adapter *adapter)
2240{
2241 struct net_device *netdev = adapter->netdev;
2242 int status;
2243
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002244 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245 status = be_msix_register(adapter);
2246 if (status == 0)
2247 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002248 /* INTx is not supported for VF */
2249 if (!be_physfn(adapter))
2250 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002251 }
2252
2253 /* INTx */
2254 netdev->irq = adapter->pdev->irq;
2255 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2256 adapter);
2257 if (status) {
2258 dev_err(&adapter->pdev->dev,
2259 "INTx request IRQ failed - err %d\n", status);
2260 return status;
2261 }
2262done:
2263 adapter->isr_registered = true;
2264 return 0;
2265}
2266
2267static void be_irq_unregister(struct be_adapter *adapter)
2268{
2269 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002270 struct be_rx_obj *rxo;
2271 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272
2273 if (!adapter->isr_registered)
2274 return;
2275
2276 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002277 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002278 free_irq(netdev->irq, adapter);
2279 goto done;
2280 }
2281
2282 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002283 be_free_irq(adapter, &adapter->tx_eq, adapter);
2284
2285 for_all_rx_queues(adapter, rxo, i)
2286 be_free_irq(adapter, &rxo->rx_eq, rxo);
2287
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288done:
2289 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290}
2291
Sathya Perla889cd4b2010-05-30 23:33:45 +00002292static int be_close(struct net_device *netdev)
2293{
2294 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002295 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002296 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002297 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002298
Sathya Perla889cd4b2010-05-30 23:33:45 +00002299 be_async_mcc_disable(adapter);
2300
Sathya Perla889cd4b2010-05-30 23:33:45 +00002301 netif_carrier_off(netdev);
2302 adapter->link_up = false;
2303
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002304 if (!lancer_chip(adapter))
2305 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002306
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002307 for_all_rx_queues(adapter, rxo, i)
2308 napi_disable(&rxo->rx_eq.napi);
2309
2310 napi_disable(&tx_eq->napi);
2311
2312 if (lancer_chip(adapter)) {
2313 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2314 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2315 for_all_rx_queues(adapter, rxo, i)
2316 be_cq_notify(adapter, rxo->cq.id, false, 0);
2317 }
2318
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002319 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002320 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002321 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002322
2323 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002324 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002325 synchronize_irq(vec);
2326 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002327 } else {
2328 synchronize_irq(netdev->irq);
2329 }
2330 be_irq_unregister(adapter);
2331
Sathya Perla889cd4b2010-05-30 23:33:45 +00002332 /* Wait for all pending tx completions to arrive so that
2333 * all tx skbs are freed.
2334 */
2335 be_tx_compl_clean(adapter);
2336
2337 return 0;
2338}
2339
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340static int be_open(struct net_device *netdev)
2341{
2342 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002343 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002344 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002345 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002346 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002347 u8 mac_speed;
2348 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002349
Sathya Perla3abcded2010-10-03 22:12:27 -07002350 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002351 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002352 napi_enable(&rxo->rx_eq.napi);
2353 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002354 napi_enable(&tx_eq->napi);
2355
2356 be_irq_register(adapter);
2357
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002358 if (!lancer_chip(adapter))
2359 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002360
2361 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002362 for_all_rx_queues(adapter, rxo, i) {
2363 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2364 be_cq_notify(adapter, rxo->cq.id, true, 0);
2365 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002366 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002367
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002368 /* Now that interrupts are on we can process async mcc */
2369 be_async_mcc_enable(adapter);
2370
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002371 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002372 &link_speed, 0);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002373 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002374 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002375 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002376
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002377 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002378 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002379 if (status)
2380 goto err;
2381
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002382 status = be_cmd_set_flow_control(adapter,
2383 adapter->tx_fc, adapter->rx_fc);
2384 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002385 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002386 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002387
Sathya Perla889cd4b2010-05-30 23:33:45 +00002388 return 0;
2389err:
2390 be_close(adapter->netdev);
2391 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002392}
2393
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002394static int be_setup_wol(struct be_adapter *adapter, bool enable)
2395{
2396 struct be_dma_mem cmd;
2397 int status = 0;
2398 u8 mac[ETH_ALEN];
2399
2400 memset(mac, 0, ETH_ALEN);
2401
2402 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002403 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2404 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002405 if (cmd.va == NULL)
2406 return -1;
2407 memset(cmd.va, 0, cmd.size);
2408
2409 if (enable) {
2410 status = pci_write_config_dword(adapter->pdev,
2411 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2412 if (status) {
2413 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002414 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002415 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2416 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002417 return status;
2418 }
2419 status = be_cmd_enable_magic_wol(adapter,
2420 adapter->netdev->dev_addr, &cmd);
2421 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2422 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2423 } else {
2424 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2425 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2426 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2427 }
2428
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002429 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002430 return status;
2431}
2432
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002433/*
2434 * Generate a seed MAC address from the PF MAC Address using jhash.
2435 * MAC Address for VFs are assigned incrementally starting from the seed.
2436 * These addresses are programmed in the ASIC by the PF and the VF driver
2437 * queries for the MAC address during its probe.
2438 */
2439static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2440{
2441 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002442 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002443 u8 mac[ETH_ALEN];
2444
2445 be_vf_eth_addr_generate(adapter, mac);
2446
2447 for (vf = 0; vf < num_vfs; vf++) {
2448 status = be_cmd_pmac_add(adapter, mac,
2449 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002450 &adapter->vf_cfg[vf].vf_pmac_id,
2451 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002452 if (status)
2453 dev_err(&adapter->pdev->dev,
2454 "Mac address add failed for VF %d\n", vf);
2455 else
2456 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2457
2458 mac[5] += 1;
2459 }
2460 return status;
2461}
2462
2463static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2464{
2465 u32 vf;
2466
2467 for (vf = 0; vf < num_vfs; vf++) {
2468 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2469 be_cmd_pmac_del(adapter,
2470 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002471 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002472 }
2473}
2474
Sathya Perla5fb379e2009-06-18 00:02:59 +00002475static int be_setup(struct be_adapter *adapter)
2476{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002477 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002478 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002480 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002482 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2483 BE_IF_FLAGS_BROADCAST |
2484 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002485
2486 if (be_physfn(adapter)) {
2487 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2488 BE_IF_FLAGS_PROMISCUOUS |
2489 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2490 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002491
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002492 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002493 cap_flags |= BE_IF_FLAGS_RSS;
2494 en_flags |= BE_IF_FLAGS_RSS;
2495 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002496 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002497
2498 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2499 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002500 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002501 if (status != 0)
2502 goto do_none;
2503
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002504 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002505 if (adapter->sriov_enabled) {
2506 while (vf < num_vfs) {
2507 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2508 BE_IF_FLAGS_BROADCAST;
2509 status = be_cmd_if_create(adapter, cap_flags,
2510 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002511 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002512 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002513 if (status) {
2514 dev_err(&adapter->pdev->dev,
2515 "Interface Create failed for VF %d\n",
2516 vf);
2517 goto if_destroy;
2518 }
2519 adapter->vf_cfg[vf].vf_pmac_id =
2520 BE_INVALID_PMAC_ID;
2521 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002522 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002523 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002524 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002525 status = be_cmd_mac_addr_query(adapter, mac,
2526 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2527 if (!status) {
2528 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2529 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2530 }
2531 }
2532
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533 status = be_tx_queues_create(adapter);
2534 if (status != 0)
2535 goto if_destroy;
2536
2537 status = be_rx_queues_create(adapter);
2538 if (status != 0)
2539 goto tx_qs_destroy;
2540
Sathya Perla5fb379e2009-06-18 00:02:59 +00002541 status = be_mcc_queues_create(adapter);
2542 if (status != 0)
2543 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002544
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002545 adapter->link_speed = -1;
2546
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002547 return 0;
2548
Sathya Perla5fb379e2009-06-18 00:02:59 +00002549rx_qs_destroy:
2550 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002551tx_qs_destroy:
2552 be_tx_queues_destroy(adapter);
2553if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002554 if (be_physfn(adapter) && adapter->sriov_enabled)
2555 for (vf = 0; vf < num_vfs; vf++)
2556 if (adapter->vf_cfg[vf].vf_if_handle)
2557 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002558 adapter->vf_cfg[vf].vf_if_handle,
2559 vf + 1);
2560 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002561do_none:
2562 return status;
2563}
2564
Sathya Perla5fb379e2009-06-18 00:02:59 +00002565static int be_clear(struct be_adapter *adapter)
2566{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002567 int vf;
2568
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002569 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002570 be_vf_eth_addr_rem(adapter);
2571
Sathya Perla1a8887d2009-08-17 00:58:41 +00002572 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002573 be_rx_queues_destroy(adapter);
2574 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002575 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002576
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002577 if (be_physfn(adapter) && adapter->sriov_enabled)
2578 for (vf = 0; vf < num_vfs; vf++)
2579 if (adapter->vf_cfg[vf].vf_if_handle)
2580 be_cmd_if_destroy(adapter,
2581 adapter->vf_cfg[vf].vf_if_handle,
2582 vf + 1);
2583
Ajit Khaparde658681f2011-02-11 13:34:46 +00002584 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002585
Sathya Perla2243e2e2009-11-22 22:02:03 +00002586 /* tell fw we're done with firing cmds */
2587 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002588 return 0;
2589}
2590
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591
Ajit Khaparde84517482009-09-04 03:12:16 +00002592#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002593static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002594 const u8 *p, u32 img_start, int image_size,
2595 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002596{
2597 u32 crc_offset;
2598 u8 flashed_crc[4];
2599 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002600
2601 crc_offset = hdr_size + img_start + image_size - 4;
2602
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002603 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002604
2605 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002606 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002607 if (status) {
2608 dev_err(&adapter->pdev->dev,
2609 "could not get crc from flash, not flashing redboot\n");
2610 return false;
2611 }
2612
2613 /*update redboot only if crc does not match*/
2614 if (!memcmp(flashed_crc, p, 4))
2615 return false;
2616 else
2617 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002618}
2619
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002620static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002621 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002622 struct be_dma_mem *flash_cmd, int num_of_images)
2623
Ajit Khaparde84517482009-09-04 03:12:16 +00002624{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002625 int status = 0, i, filehdr_size = 0;
2626 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002627 int num_bytes;
2628 const u8 *p = fw->data;
2629 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002630 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002631 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002632
Joe Perches215faf92010-12-21 02:16:10 -08002633 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002634 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2635 FLASH_IMAGE_MAX_SIZE_g3},
2636 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2637 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2638 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2639 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2640 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2641 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2642 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2643 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2644 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2645 FLASH_IMAGE_MAX_SIZE_g3},
2646 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2647 FLASH_IMAGE_MAX_SIZE_g3},
2648 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002649 FLASH_IMAGE_MAX_SIZE_g3},
2650 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2651 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002652 };
Joe Perches215faf92010-12-21 02:16:10 -08002653 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002654 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2655 FLASH_IMAGE_MAX_SIZE_g2},
2656 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2657 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2658 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2659 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2660 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2661 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2662 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2663 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2664 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2665 FLASH_IMAGE_MAX_SIZE_g2},
2666 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2667 FLASH_IMAGE_MAX_SIZE_g2},
2668 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2669 FLASH_IMAGE_MAX_SIZE_g2}
2670 };
2671
2672 if (adapter->generation == BE_GEN3) {
2673 pflashcomp = gen3_flash_types;
2674 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002675 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002676 } else {
2677 pflashcomp = gen2_flash_types;
2678 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002679 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002680 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002681 for (i = 0; i < num_comp; i++) {
2682 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2683 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2684 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002685 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2686 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002687 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2688 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002689 continue;
2690 p = fw->data;
2691 p += filehdr_size + pflashcomp[i].offset
2692 + (num_of_images * sizeof(struct image_hdr));
2693 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002694 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002695 total_bytes = pflashcomp[i].size;
2696 while (total_bytes) {
2697 if (total_bytes > 32*1024)
2698 num_bytes = 32*1024;
2699 else
2700 num_bytes = total_bytes;
2701 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002702
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002703 if (!total_bytes)
2704 flash_op = FLASHROM_OPER_FLASH;
2705 else
2706 flash_op = FLASHROM_OPER_SAVE;
2707 memcpy(req->params.data_buf, p, num_bytes);
2708 p += num_bytes;
2709 status = be_cmd_write_flashrom(adapter, flash_cmd,
2710 pflashcomp[i].optype, flash_op, num_bytes);
2711 if (status) {
2712 dev_err(&adapter->pdev->dev,
2713 "cmd to write to flash rom failed.\n");
2714 return -1;
2715 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002716 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002717 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002718 return 0;
2719}
2720
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002721static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2722{
2723 if (fhdr == NULL)
2724 return 0;
2725 if (fhdr->build[0] == '3')
2726 return BE_GEN3;
2727 else if (fhdr->build[0] == '2')
2728 return BE_GEN2;
2729 else
2730 return 0;
2731}
2732
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002733static int lancer_fw_download(struct be_adapter *adapter,
2734 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002735{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002736#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2737#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2738 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002739 const u8 *data_ptr = NULL;
2740 u8 *dest_image_ptr = NULL;
2741 size_t image_size = 0;
2742 u32 chunk_size = 0;
2743 u32 data_written = 0;
2744 u32 offset = 0;
2745 int status = 0;
2746 u8 add_status = 0;
2747
2748 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2749 dev_err(&adapter->pdev->dev,
2750 "FW Image not properly aligned. "
2751 "Length must be 4 byte aligned.\n");
2752 status = -EINVAL;
2753 goto lancer_fw_exit;
2754 }
2755
2756 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2757 + LANCER_FW_DOWNLOAD_CHUNK;
2758 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2759 &flash_cmd.dma, GFP_KERNEL);
2760 if (!flash_cmd.va) {
2761 status = -ENOMEM;
2762 dev_err(&adapter->pdev->dev,
2763 "Memory allocation failure while flashing\n");
2764 goto lancer_fw_exit;
2765 }
2766
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002767 dest_image_ptr = flash_cmd.va +
2768 sizeof(struct lancer_cmd_req_write_object);
2769 image_size = fw->size;
2770 data_ptr = fw->data;
2771
2772 while (image_size) {
2773 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2774
2775 /* Copy the image chunk content. */
2776 memcpy(dest_image_ptr, data_ptr, chunk_size);
2777
2778 status = lancer_cmd_write_object(adapter, &flash_cmd,
2779 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2780 &data_written, &add_status);
2781
2782 if (status)
2783 break;
2784
2785 offset += data_written;
2786 data_ptr += data_written;
2787 image_size -= data_written;
2788 }
2789
2790 if (!status) {
2791 /* Commit the FW written */
2792 status = lancer_cmd_write_object(adapter, &flash_cmd,
2793 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2794 &data_written, &add_status);
2795 }
2796
2797 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2798 flash_cmd.dma);
2799 if (status) {
2800 dev_err(&adapter->pdev->dev,
2801 "Firmware load error. "
2802 "Status code: 0x%x Additional Status: 0x%x\n",
2803 status, add_status);
2804 goto lancer_fw_exit;
2805 }
2806
2807 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2808lancer_fw_exit:
2809 return status;
2810}
2811
2812static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2813{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002814 struct flash_file_hdr_g2 *fhdr;
2815 struct flash_file_hdr_g3 *fhdr3;
2816 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002817 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002818 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002819 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002820
2821 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002822 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002823
Ajit Khaparde84517482009-09-04 03:12:16 +00002824 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002825 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2826 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002827 if (!flash_cmd.va) {
2828 status = -ENOMEM;
2829 dev_err(&adapter->pdev->dev,
2830 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002831 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002832 }
2833
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002834 if ((adapter->generation == BE_GEN3) &&
2835 (get_ufigen_type(fhdr) == BE_GEN3)) {
2836 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002837 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2838 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002839 img_hdr_ptr = (struct image_hdr *) (fw->data +
2840 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002841 i * sizeof(struct image_hdr)));
2842 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2843 status = be_flash_data(adapter, fw, &flash_cmd,
2844 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002845 }
2846 } else if ((adapter->generation == BE_GEN2) &&
2847 (get_ufigen_type(fhdr) == BE_GEN2)) {
2848 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2849 } else {
2850 dev_err(&adapter->pdev->dev,
2851 "UFI and Interface are not compatible for flashing\n");
2852 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002853 }
2854
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002855 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2856 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002857 if (status) {
2858 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002859 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002860 }
2861
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002862 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002863
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002864be_fw_exit:
2865 return status;
2866}
2867
2868int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2869{
2870 const struct firmware *fw;
2871 int status;
2872
2873 if (!netif_running(adapter->netdev)) {
2874 dev_err(&adapter->pdev->dev,
2875 "Firmware load not allowed (interface is down)\n");
2876 return -1;
2877 }
2878
2879 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2880 if (status)
2881 goto fw_exit;
2882
2883 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2884
2885 if (lancer_chip(adapter))
2886 status = lancer_fw_download(adapter, fw);
2887 else
2888 status = be_fw_download(adapter, fw);
2889
Ajit Khaparde84517482009-09-04 03:12:16 +00002890fw_exit:
2891 release_firmware(fw);
2892 return status;
2893}
2894
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002895static struct net_device_ops be_netdev_ops = {
2896 .ndo_open = be_open,
2897 .ndo_stop = be_close,
2898 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899 .ndo_set_rx_mode = be_set_multicast_list,
2900 .ndo_set_mac_address = be_mac_addr_set,
2901 .ndo_change_mtu = be_change_mtu,
2902 .ndo_validate_addr = eth_validate_addr,
2903 .ndo_vlan_rx_register = be_vlan_register,
2904 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2905 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002906 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002907 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002908 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002909 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002910};
2911
2912static void be_netdev_init(struct net_device *netdev)
2913{
2914 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002915 struct be_rx_obj *rxo;
2916 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002917
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002918 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002919 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2920 NETIF_F_HW_VLAN_TX;
2921 if (be_multi_rxq(adapter))
2922 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002923
2924 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002925 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002926
Michał Mirosław79032642010-11-30 06:38:00 +00002927 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2928 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002929
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002930 if (lancer_chip(adapter))
2931 netdev->vlan_features |= NETIF_F_TSO6;
2932
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933 netdev->flags |= IFF_MULTICAST;
2934
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002935 /* Default settings for Rx and Tx flow control */
2936 adapter->rx_fc = true;
2937 adapter->tx_fc = true;
2938
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002939 netif_set_gso_max_size(netdev, 65535);
2940
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002941 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2942
2943 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2944
Sathya Perla3abcded2010-10-03 22:12:27 -07002945 for_all_rx_queues(adapter, rxo, i)
2946 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2947 BE_NAPI_WEIGHT);
2948
Sathya Perla5fb379e2009-06-18 00:02:59 +00002949 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002950 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002951}
2952
2953static void be_unmap_pci_bars(struct be_adapter *adapter)
2954{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002955 if (adapter->csr)
2956 iounmap(adapter->csr);
2957 if (adapter->db)
2958 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002959 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002960 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002961}
2962
2963static int be_map_pci_bars(struct be_adapter *adapter)
2964{
2965 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002966 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002967
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002968 if (lancer_chip(adapter)) {
2969 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2970 pci_resource_len(adapter->pdev, 0));
2971 if (addr == NULL)
2972 return -ENOMEM;
2973 adapter->db = addr;
2974 return 0;
2975 }
2976
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002977 if (be_physfn(adapter)) {
2978 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2979 pci_resource_len(adapter->pdev, 2));
2980 if (addr == NULL)
2981 return -ENOMEM;
2982 adapter->csr = addr;
2983 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002984
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002985 if (adapter->generation == BE_GEN2) {
2986 pcicfg_reg = 1;
2987 db_reg = 4;
2988 } else {
2989 pcicfg_reg = 0;
2990 if (be_physfn(adapter))
2991 db_reg = 4;
2992 else
2993 db_reg = 0;
2994 }
2995 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2996 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002997 if (addr == NULL)
2998 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002999 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003000
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003001 if (be_physfn(adapter)) {
3002 addr = ioremap_nocache(
3003 pci_resource_start(adapter->pdev, pcicfg_reg),
3004 pci_resource_len(adapter->pdev, pcicfg_reg));
3005 if (addr == NULL)
3006 goto pci_map_err;
3007 adapter->pcicfg = addr;
3008 } else
3009 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010
3011 return 0;
3012pci_map_err:
3013 be_unmap_pci_bars(adapter);
3014 return -ENOMEM;
3015}
3016
3017
3018static void be_ctrl_cleanup(struct be_adapter *adapter)
3019{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003020 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003021
3022 be_unmap_pci_bars(adapter);
3023
3024 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003025 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3026 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003027
3028 mem = &adapter->mc_cmd_mem;
3029 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003030 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3031 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003032}
3033
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003034static int be_ctrl_init(struct be_adapter *adapter)
3035{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003036 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3037 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003038 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003039 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003040
3041 status = be_map_pci_bars(adapter);
3042 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003043 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003044
3045 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003046 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3047 mbox_mem_alloc->size,
3048 &mbox_mem_alloc->dma,
3049 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003050 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003051 status = -ENOMEM;
3052 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00003054
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003055 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3056 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3057 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3058 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003059
3060 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003061 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3062 mc_cmd_mem->size, &mc_cmd_mem->dma,
3063 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003064 if (mc_cmd_mem->va == NULL) {
3065 status = -ENOMEM;
3066 goto free_mbox;
3067 }
3068 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3069
Ivan Vecera29849612010-12-14 05:43:19 +00003070 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003071 spin_lock_init(&adapter->mcc_lock);
3072 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003074 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003075 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003076 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003077
3078free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003079 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3080 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003081
3082unmap_pci_bars:
3083 be_unmap_pci_bars(adapter);
3084
3085done:
3086 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003087}
3088
3089static void be_stats_cleanup(struct be_adapter *adapter)
3090{
Sathya Perla3abcded2010-10-03 22:12:27 -07003091 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003092
3093 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003094 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3095 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003096}
3097
3098static int be_stats_init(struct be_adapter *adapter)
3099{
Sathya Perla3abcded2010-10-03 22:12:27 -07003100 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003101
Selvin Xavier005d5692011-05-16 07:36:35 +00003102 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003103 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003104 } else {
3105 if (lancer_chip(adapter))
3106 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3107 else
3108 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3109 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003110 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3111 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003112 if (cmd->va == NULL)
3113 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003114 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003115 return 0;
3116}
3117
3118static void __devexit be_remove(struct pci_dev *pdev)
3119{
3120 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003122 if (!adapter)
3123 return;
3124
Somnath Koturf203af72010-10-25 23:01:03 +00003125 cancel_delayed_work_sync(&adapter->work);
3126
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003127 unregister_netdev(adapter->netdev);
3128
Sathya Perla5fb379e2009-06-18 00:02:59 +00003129 be_clear(adapter);
3130
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003131 be_stats_cleanup(adapter);
3132
3133 be_ctrl_cleanup(adapter);
3134
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003135 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003136 be_sriov_disable(adapter);
3137
Sathya Perla8d56ff12009-11-22 22:02:26 +00003138 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139
3140 pci_set_drvdata(pdev, NULL);
3141 pci_release_regions(pdev);
3142 pci_disable_device(pdev);
3143
3144 free_netdev(adapter->netdev);
3145}
3146
Sathya Perla2243e2e2009-11-22 22:02:03 +00003147static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003150 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003151
Sathya Perla8788fdc2009-07-27 22:52:03 +00003152 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003153 if (status)
3154 return status;
3155
Sathya Perla3abcded2010-10-03 22:12:27 -07003156 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3157 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003158 if (status)
3159 return status;
3160
3161 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003162
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003163 /* A default permanent address is given to each VF for Lancer*/
3164 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003165 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003166 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003167
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003168 if (status)
3169 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003170
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003171 if (!is_valid_ether_addr(mac))
3172 return -EADDRNOTAVAIL;
3173
3174 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3175 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3176 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003177
Ajit Khaparde3486be22010-07-23 02:04:54 +00003178 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003179 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3180 else
3181 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3182
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003183 status = be_cmd_get_cntl_attributes(adapter);
3184 if (status)
3185 return status;
3186
Sathya Perla2e588f82011-03-11 02:49:26 +00003187 be_cmd_check_native_mode(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003188 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003189}
3190
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003191static int be_dev_family_check(struct be_adapter *adapter)
3192{
3193 struct pci_dev *pdev = adapter->pdev;
3194 u32 sli_intf = 0, if_type;
3195
3196 switch (pdev->device) {
3197 case BE_DEVICE_ID1:
3198 case OC_DEVICE_ID1:
3199 adapter->generation = BE_GEN2;
3200 break;
3201 case BE_DEVICE_ID2:
3202 case OC_DEVICE_ID2:
3203 adapter->generation = BE_GEN3;
3204 break;
3205 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003206 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003207 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3208 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3209 SLI_INTF_IF_TYPE_SHIFT;
3210
3211 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3212 if_type != 0x02) {
3213 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3214 return -EINVAL;
3215 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003216 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3217 SLI_INTF_FAMILY_SHIFT);
3218 adapter->generation = BE_GEN3;
3219 break;
3220 default:
3221 adapter->generation = 0;
3222 }
3223 return 0;
3224}
3225
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003226static int lancer_wait_ready(struct be_adapter *adapter)
3227{
3228#define SLIPORT_READY_TIMEOUT 500
3229 u32 sliport_status;
3230 int status = 0, i;
3231
3232 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3233 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3234 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3235 break;
3236
3237 msleep(20);
3238 }
3239
3240 if (i == SLIPORT_READY_TIMEOUT)
3241 status = -1;
3242
3243 return status;
3244}
3245
3246static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3247{
3248 int status;
3249 u32 sliport_status, err, reset_needed;
3250 status = lancer_wait_ready(adapter);
3251 if (!status) {
3252 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3253 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3254 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3255 if (err && reset_needed) {
3256 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3257 adapter->db + SLIPORT_CONTROL_OFFSET);
3258
3259 /* check adapter has corrected the error */
3260 status = lancer_wait_ready(adapter);
3261 sliport_status = ioread32(adapter->db +
3262 SLIPORT_STATUS_OFFSET);
3263 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3264 SLIPORT_STATUS_RN_MASK);
3265 if (status || sliport_status)
3266 status = -1;
3267 } else if (err || reset_needed) {
3268 status = -1;
3269 }
3270 }
3271 return status;
3272}
3273
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274static int __devinit be_probe(struct pci_dev *pdev,
3275 const struct pci_device_id *pdev_id)
3276{
3277 int status = 0;
3278 struct be_adapter *adapter;
3279 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003280
3281 status = pci_enable_device(pdev);
3282 if (status)
3283 goto do_none;
3284
3285 status = pci_request_regions(pdev, DRV_NAME);
3286 if (status)
3287 goto disable_dev;
3288 pci_set_master(pdev);
3289
3290 netdev = alloc_etherdev(sizeof(struct be_adapter));
3291 if (netdev == NULL) {
3292 status = -ENOMEM;
3293 goto rel_reg;
3294 }
3295 adapter = netdev_priv(netdev);
3296 adapter->pdev = pdev;
3297 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003298
3299 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003300 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003301 goto free_netdev;
3302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003303 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003304 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003305
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003306 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003307 if (!status) {
3308 netdev->features |= NETIF_F_HIGHDMA;
3309 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003310 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003311 if (status) {
3312 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3313 goto free_netdev;
3314 }
3315 }
3316
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003317 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003318 if (adapter->sriov_enabled) {
3319 adapter->vf_cfg = kcalloc(num_vfs,
3320 sizeof(struct be_vf_cfg), GFP_KERNEL);
3321
3322 if (!adapter->vf_cfg)
3323 goto free_netdev;
3324 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003325
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003326 status = be_ctrl_init(adapter);
3327 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003328 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003329
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003330 if (lancer_chip(adapter)) {
3331 status = lancer_test_and_set_rdy_state(adapter);
3332 if (status) {
3333 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003334 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003335 }
3336 }
3337
Sathya Perla2243e2e2009-11-22 22:02:03 +00003338 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003339 if (be_physfn(adapter)) {
3340 status = be_cmd_POST(adapter);
3341 if (status)
3342 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003343 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003344
3345 /* tell fw we're ready to fire cmds */
3346 status = be_cmd_fw_init(adapter);
3347 if (status)
3348 goto ctrl_clean;
3349
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003350 status = be_cmd_reset_function(adapter);
3351 if (status)
3352 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003353
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003354 status = be_stats_init(adapter);
3355 if (status)
3356 goto ctrl_clean;
3357
Sathya Perla2243e2e2009-11-22 22:02:03 +00003358 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003359 if (status)
3360 goto stats_clean;
3361
Sathya Perla3abcded2010-10-03 22:12:27 -07003362 be_msix_enable(adapter);
3363
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003364 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003365
Sathya Perla5fb379e2009-06-18 00:02:59 +00003366 status = be_setup(adapter);
3367 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003368 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003369
Sathya Perla3abcded2010-10-03 22:12:27 -07003370 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003371 status = register_netdev(netdev);
3372 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003373 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003374 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003375
Ajit Khapardee6319362011-02-11 13:35:41 +00003376 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003377 u8 mac_speed;
3378 bool link_up;
3379 u16 vf, lnk_speed;
3380
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003381 if (!lancer_chip(adapter)) {
3382 status = be_vf_eth_addr_config(adapter);
3383 if (status)
3384 goto unreg_netdev;
3385 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003386
3387 for (vf = 0; vf < num_vfs; vf++) {
3388 status = be_cmd_link_status_query(adapter, &link_up,
3389 &mac_speed, &lnk_speed, vf + 1);
3390 if (!status)
3391 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3392 else
3393 goto unreg_netdev;
3394 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003395 }
3396
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003397 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00003398 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003399 return 0;
3400
Ajit Khapardee6319362011-02-11 13:35:41 +00003401unreg_netdev:
3402 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003403unsetup:
3404 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003405msix_disable:
3406 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003407stats_clean:
3408 be_stats_cleanup(adapter);
3409ctrl_clean:
3410 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003411free_vf_cfg:
3412 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003413free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003414 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003415 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003416 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003417rel_reg:
3418 pci_release_regions(pdev);
3419disable_dev:
3420 pci_disable_device(pdev);
3421do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003422 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003423 return status;
3424}
3425
3426static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3427{
3428 struct be_adapter *adapter = pci_get_drvdata(pdev);
3429 struct net_device *netdev = adapter->netdev;
3430
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003431 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003432 if (adapter->wol)
3433 be_setup_wol(adapter, true);
3434
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003435 netif_device_detach(netdev);
3436 if (netif_running(netdev)) {
3437 rtnl_lock();
3438 be_close(netdev);
3439 rtnl_unlock();
3440 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003441 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003442 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003443
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003444 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003445 pci_save_state(pdev);
3446 pci_disable_device(pdev);
3447 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3448 return 0;
3449}
3450
3451static int be_resume(struct pci_dev *pdev)
3452{
3453 int status = 0;
3454 struct be_adapter *adapter = pci_get_drvdata(pdev);
3455 struct net_device *netdev = adapter->netdev;
3456
3457 netif_device_detach(netdev);
3458
3459 status = pci_enable_device(pdev);
3460 if (status)
3461 return status;
3462
3463 pci_set_power_state(pdev, 0);
3464 pci_restore_state(pdev);
3465
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003466 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003467 /* tell fw we're ready to fire cmds */
3468 status = be_cmd_fw_init(adapter);
3469 if (status)
3470 return status;
3471
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003472 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003473 if (netif_running(netdev)) {
3474 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003475 be_open(netdev);
3476 rtnl_unlock();
3477 }
3478 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003479
3480 if (adapter->wol)
3481 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003482
3483 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003484 return 0;
3485}
3486
Sathya Perla82456b02010-02-17 01:35:37 +00003487/*
3488 * An FLR will stop BE from DMAing any data.
3489 */
3490static void be_shutdown(struct pci_dev *pdev)
3491{
3492 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003493
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003494 if (!adapter)
3495 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003496
Sathya Perla0f4a6822011-03-21 20:49:28 +00003497 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003498
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003499 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003500
Sathya Perla82456b02010-02-17 01:35:37 +00003501 if (adapter->wol)
3502 be_setup_wol(adapter, true);
3503
Ajit Khaparde57841862011-04-06 18:08:43 +00003504 be_cmd_reset_function(adapter);
3505
Sathya Perla82456b02010-02-17 01:35:37 +00003506 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003507}
3508
Sathya Perlacf588472010-02-14 21:22:01 +00003509static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3510 pci_channel_state_t state)
3511{
3512 struct be_adapter *adapter = pci_get_drvdata(pdev);
3513 struct net_device *netdev = adapter->netdev;
3514
3515 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3516
3517 adapter->eeh_err = true;
3518
3519 netif_device_detach(netdev);
3520
3521 if (netif_running(netdev)) {
3522 rtnl_lock();
3523 be_close(netdev);
3524 rtnl_unlock();
3525 }
3526 be_clear(adapter);
3527
3528 if (state == pci_channel_io_perm_failure)
3529 return PCI_ERS_RESULT_DISCONNECT;
3530
3531 pci_disable_device(pdev);
3532
3533 return PCI_ERS_RESULT_NEED_RESET;
3534}
3535
3536static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3537{
3538 struct be_adapter *adapter = pci_get_drvdata(pdev);
3539 int status;
3540
3541 dev_info(&adapter->pdev->dev, "EEH reset\n");
3542 adapter->eeh_err = false;
3543
3544 status = pci_enable_device(pdev);
3545 if (status)
3546 return PCI_ERS_RESULT_DISCONNECT;
3547
3548 pci_set_master(pdev);
3549 pci_set_power_state(pdev, 0);
3550 pci_restore_state(pdev);
3551
3552 /* Check if card is ok and fw is ready */
3553 status = be_cmd_POST(adapter);
3554 if (status)
3555 return PCI_ERS_RESULT_DISCONNECT;
3556
3557 return PCI_ERS_RESULT_RECOVERED;
3558}
3559
3560static void be_eeh_resume(struct pci_dev *pdev)
3561{
3562 int status = 0;
3563 struct be_adapter *adapter = pci_get_drvdata(pdev);
3564 struct net_device *netdev = adapter->netdev;
3565
3566 dev_info(&adapter->pdev->dev, "EEH resume\n");
3567
3568 pci_save_state(pdev);
3569
3570 /* tell fw we're ready to fire cmds */
3571 status = be_cmd_fw_init(adapter);
3572 if (status)
3573 goto err;
3574
3575 status = be_setup(adapter);
3576 if (status)
3577 goto err;
3578
3579 if (netif_running(netdev)) {
3580 status = be_open(netdev);
3581 if (status)
3582 goto err;
3583 }
3584 netif_device_attach(netdev);
3585 return;
3586err:
3587 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003588}
3589
3590static struct pci_error_handlers be_eeh_handlers = {
3591 .error_detected = be_eeh_err_detected,
3592 .slot_reset = be_eeh_reset,
3593 .resume = be_eeh_resume,
3594};
3595
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003596static struct pci_driver be_driver = {
3597 .name = DRV_NAME,
3598 .id_table = be_dev_ids,
3599 .probe = be_probe,
3600 .remove = be_remove,
3601 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003602 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003603 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003604 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003605};
3606
3607static int __init be_init_module(void)
3608{
Joe Perches8e95a202009-12-03 07:58:21 +00003609 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3610 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003611 printk(KERN_WARNING DRV_NAME
3612 " : Module param rx_frag_size must be 2048/4096/8192."
3613 " Using 2048\n");
3614 rx_frag_size = 2048;
3615 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003616
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003617 return pci_register_driver(&be_driver);
3618}
3619module_init(be_init_module);
3620
3621static void __exit be_exit_module(void)
3622{
3623 pci_unregister_driver(&be_driver);
3624}
3625module_exit(be_exit_module);