blob: 6ddaa34121e8914bbe46a162d6d8f5c88f0630a9 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla3abcded2010-10-03 22:12:27 -070036static bool multi_rxq = true;
37module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
51static char *ue_status_low_desc[] = {
52 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
86static char *ue_status_hi_desc[] = {
87 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
110 "NETC"
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
121static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
122{
123 struct be_dma_mem *mem = &q->dma_mem;
124 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000125 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
126 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127}
128
129static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
130 u16 len, u16 entry_size)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
133
134 memset(q, 0, sizeof(*q));
135 q->len = len;
136 q->entry_size = entry_size;
137 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000138 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
139 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700140 if (!mem->va)
141 return -1;
142 memset(mem->va, 0, mem->size);
143 return 0;
144}
145
Sathya Perla8788fdc2009-07-27 22:52:03 +0000146static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000148 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 u32 reg = ioread32(addr);
150 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151
Sathya Perlacf588472010-02-14 21:22:01 +0000152 if (adapter->eeh_err)
153 return;
154
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 iowrite32(reg, addr);
163}
164
Sathya Perla8788fdc2009-07-27 22:52:03 +0000165static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166{
167 u32 val = 0;
168 val |= qid & DB_RQ_RING_ID_MASK;
169 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000170
171 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000172 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173}
174
Sathya Perla8788fdc2009-07-27 22:52:03 +0000175static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176{
177 u32 val = 0;
178 val |= qid & DB_TXULP_RING_ID_MASK;
179 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000180
181 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000182 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700183}
184
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186 bool arm, bool clear_int, u16 num_popped)
187{
188 u32 val = 0;
189 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000190 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
191 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000192
193 if (adapter->eeh_err)
194 return;
195
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196 if (arm)
197 val |= 1 << DB_EQ_REARM_SHIFT;
198 if (clear_int)
199 val |= 1 << DB_EQ_CLR_SHIFT;
200 val |= 1 << DB_EQ_EVNT_SHIFT;
201 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000202 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203}
204
Sathya Perla8788fdc2009-07-27 22:52:03 +0000205void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
208 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000209 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
210 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000211
212 if (adapter->eeh_err)
213 return;
214
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215 if (arm)
216 val |= 1 << DB_CQ_REARM_SHIFT;
217 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000218 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219}
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221static int be_mac_addr_set(struct net_device *netdev, void *p)
222{
223 struct be_adapter *adapter = netdev_priv(netdev);
224 struct sockaddr *addr = p;
225 int status = 0;
226
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000227 if (!is_valid_ether_addr(addr->sa_data))
228 return -EADDRNOTAVAIL;
229
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000230 /* MAC addr configuration will be done in hardware for VFs
231 * by their corresponding PFs. Just copy to netdev addr here
232 */
233 if (!be_physfn(adapter))
234 goto netdev_addr;
235
Ajit Khapardef8617e02011-02-11 13:36:37 +0000236 status = be_cmd_pmac_del(adapter, adapter->if_handle,
237 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000238 if (status)
239 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240
Sathya Perlaa65027e2009-08-17 00:58:04 +0000241 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000242 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000243netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244 if (!status)
245 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
246
247 return status;
248}
249
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000250static void populate_be2_stats(struct be_adapter *adapter)
251{
252
253 struct be_drv_stats *drvs = &adapter->drv_stats;
254 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
255 struct be_port_rxf_stats_v0 *port_stats =
256 be_port_rxf_stats_from_cmd(adapter);
257 struct be_rxf_stats_v0 *rxf_stats =
258 be_rxf_stats_from_cmd(adapter);
259
260 drvs->rx_pause_frames = port_stats->rx_pause_frames;
261 drvs->rx_crc_errors = port_stats->rx_crc_errors;
262 drvs->rx_control_frames = port_stats->rx_control_frames;
263 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
264 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
265 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
266 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
267 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
268 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
269 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
270 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
271 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
272 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
273 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
274 drvs->rx_input_fifo_overflow_drop =
275 port_stats->rx_input_fifo_overflow;
276 drvs->rx_dropped_header_too_small =
277 port_stats->rx_dropped_header_too_small;
278 drvs->rx_address_match_errors =
279 port_stats->rx_address_match_errors;
280 drvs->rx_alignment_symbol_errors =
281 port_stats->rx_alignment_symbol_errors;
282
283 drvs->tx_pauseframes = port_stats->tx_pauseframes;
284 drvs->tx_controlframes = port_stats->tx_controlframes;
285
286 if (adapter->port_num)
287 drvs->jabber_events =
288 rxf_stats->port1_jabber_events;
289 else
290 drvs->jabber_events =
291 rxf_stats->port0_jabber_events;
292 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
293 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
294 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
295 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
296 drvs->forwarded_packets = rxf_stats->forwarded_packets;
297 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
298 drvs->rx_drops_no_tpre_descr =
299 rxf_stats->rx_drops_no_tpre_descr;
300 drvs->rx_drops_too_many_frags =
301 rxf_stats->rx_drops_too_many_frags;
302 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
303}
304
305static void populate_be3_stats(struct be_adapter *adapter)
306{
307 struct be_drv_stats *drvs = &adapter->drv_stats;
308 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
309
310 struct be_rxf_stats_v1 *rxf_stats =
311 be_rxf_stats_from_cmd(adapter);
312 struct be_port_rxf_stats_v1 *port_stats =
313 be_port_rxf_stats_from_cmd(adapter);
314
315 drvs->rx_priority_pause_frames = 0;
316 drvs->pmem_fifo_overflow_drop = 0;
317 drvs->rx_pause_frames = port_stats->rx_pause_frames;
318 drvs->rx_crc_errors = port_stats->rx_crc_errors;
319 drvs->rx_control_frames = port_stats->rx_control_frames;
320 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
321 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
322 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
323 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
324 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
325 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
326 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
327 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
328 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
329 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
330 drvs->rx_dropped_header_too_small =
331 port_stats->rx_dropped_header_too_small;
332 drvs->rx_input_fifo_overflow_drop =
333 port_stats->rx_input_fifo_overflow_drop;
334 drvs->rx_address_match_errors =
335 port_stats->rx_address_match_errors;
336 drvs->rx_alignment_symbol_errors =
337 port_stats->rx_alignment_symbol_errors;
338 drvs->rxpp_fifo_overflow_drop =
339 port_stats->rxpp_fifo_overflow_drop;
340 drvs->tx_pauseframes = port_stats->tx_pauseframes;
341 drvs->tx_controlframes = port_stats->tx_controlframes;
342 drvs->jabber_events = port_stats->jabber_events;
343 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347 drvs->forwarded_packets = rxf_stats->forwarded_packets;
348 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
349 drvs->rx_drops_no_tpre_descr =
350 rxf_stats->rx_drops_no_tpre_descr;
351 drvs->rx_drops_too_many_frags =
352 rxf_stats->rx_drops_too_many_frags;
353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
Selvin Xavier005d5692011-05-16 07:36:35 +0000356static void populate_lancer_stats(struct be_adapter *adapter)
357{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358
Selvin Xavier005d5692011-05-16 07:36:35 +0000359 struct be_drv_stats *drvs = &adapter->drv_stats;
360 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
361 (adapter);
362 drvs->rx_priority_pause_frames = 0;
363 drvs->pmem_fifo_overflow_drop = 0;
364 drvs->rx_pause_frames =
Selvin Xavieraedfebb2011-06-06 02:27:13 +0000365 make_64bit_val(pport_stats->rx_pause_frames_hi,
366 pport_stats->rx_pause_frames_lo);
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
368 pport_stats->rx_crc_errors_lo);
369 drvs->rx_control_frames =
370 make_64bit_val(pport_stats->rx_control_frames_hi,
371 pport_stats->rx_control_frames_lo);
372 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long =
374 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
375 pport_stats->rx_frames_too_long_lo);
376 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
377 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
378 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
379 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
380 drvs->rx_dropped_tcp_length =
381 pport_stats->rx_dropped_invalid_tcp_length;
382 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
383 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
384 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
385 drvs->rx_dropped_header_too_small =
386 pport_stats->rx_dropped_header_too_small;
387 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
389 drvs->rx_alignment_symbol_errors =
390 make_64bit_val(pport_stats->rx_symbol_errors_hi,
391 pport_stats->rx_symbol_errors_lo);
392 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
393 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
394 pport_stats->tx_pause_frames_lo);
395 drvs->tx_controlframes =
396 make_64bit_val(pport_stats->tx_control_frames_hi,
397 pport_stats->tx_control_frames_lo);
398 drvs->jabber_events = pport_stats->rx_jabbers;
399 drvs->rx_drops_no_pbuf = 0;
400 drvs->rx_drops_no_txpb = 0;
401 drvs->rx_drops_no_erx_descr = 0;
402 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
403 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
404 pport_stats->num_forwards_lo);
405 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
406 pport_stats->rx_drops_mtu_lo);
407 drvs->rx_drops_no_tpre_descr = 0;
408 drvs->rx_drops_too_many_frags =
409 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
410 pport_stats->rx_drops_too_many_frags_lo);
411}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
413void be_parse_stats(struct be_adapter *adapter)
414{
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423}
424
Sathya Perlab31c50a2009-09-17 10:30:13 -0700425void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700426{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde78122a52009-10-07 03:11:20 -0700428 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700429 struct be_rx_obj *rxo;
430 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700431
Sathya Perla3abcded2010-10-03 22:12:27 -0700432 memset(dev_stats, 0, sizeof(*dev_stats));
433 for_all_rx_queues(adapter, rxo, i) {
434 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
435 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
436 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
437 /* no space in linux buffers: best possible approximation */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 if (adapter->generation == BE_GEN3) {
Selvin Xavier005d5692011-05-16 07:36:35 +0000439 if (!(lancer_chip(adapter))) {
440 struct be_erx_stats_v1 *erx_stats =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 be_erx_stats_from_cmd(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000442 dev_stats->rx_dropped +=
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 erx_stats->rx_drops_no_fragments[rxo->q.id];
Selvin Xavier005d5692011-05-16 07:36:35 +0000444 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000445 } else {
446 struct be_erx_stats_v0 *erx_stats =
447 be_erx_stats_from_cmd(adapter);
448 dev_stats->rx_dropped +=
449 erx_stats->rx_drops_no_fragments[rxo->q.id];
450 }
Sathya Perla3abcded2010-10-03 22:12:27 -0700451 }
452
453 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
454 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700455
456 /* bad pkts received */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000457 dev_stats->rx_errors = drvs->rx_crc_errors +
458 drvs->rx_alignment_symbol_errors +
459 drvs->rx_in_range_errors +
460 drvs->rx_out_range_errors +
461 drvs->rx_frame_too_long +
462 drvs->rx_dropped_too_small +
463 drvs->rx_dropped_too_short +
464 drvs->rx_dropped_header_too_small +
465 drvs->rx_dropped_tcp_length +
466 drvs->rx_dropped_runt +
467 drvs->rx_tcp_checksum_errs +
468 drvs->rx_ip_checksum_errs +
469 drvs->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700470
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700471 /* detailed rx errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000472 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000475
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000476 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700477
478 /* frame alignment errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000479 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000480
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481 /* receiver fifo overrun */
482 /* drops_no_pbuf is no per i/f, it's per BE card */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000483 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
484 drvs->rx_input_fifo_overflow_drop +
485 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486}
487
Sathya Perla8788fdc2009-07-27 22:52:03 +0000488void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700489{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490 struct net_device *netdev = adapter->netdev;
491
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000493 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000494 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000495 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496 netif_carrier_on(netdev);
497 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000498 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000499 netif_carrier_off(netdev);
500 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000502 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700504}
505
506/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700507static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508{
Sathya Perla3abcded2010-10-03 22:12:27 -0700509 struct be_eq_obj *rx_eq = &rxo->rx_eq;
510 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700511 ulong now = jiffies;
512 u32 eqd;
513
514 if (!rx_eq->enable_aic)
515 return;
516
517 /* Wrapped around */
518 if (time_before(now, stats->rx_fps_jiffies)) {
519 stats->rx_fps_jiffies = now;
520 return;
521 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522
523 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700524 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525 return;
526
Sathya Perla3abcded2010-10-03 22:12:27 -0700527 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700528 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529
Sathya Perla4097f662009-03-24 16:40:13 -0700530 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700531 stats->prev_rx_frags = stats->rx_frags;
532 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533 eqd = eqd << 3;
534 if (eqd > rx_eq->max_eqd)
535 eqd = rx_eq->max_eqd;
536 if (eqd < rx_eq->min_eqd)
537 eqd = rx_eq->min_eqd;
538 if (eqd < 10)
539 eqd = 0;
540 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000541 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542
543 rx_eq->cur_eqd = eqd;
544}
545
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700546static u32 be_calc_rate(u64 bytes, unsigned long ticks)
547{
548 u64 rate = bytes;
549
550 do_div(rate, ticks / HZ);
551 rate <<= 3; /* bytes/sec -> bits/sec */
552 do_div(rate, 1000000ul); /* MB/Sec */
553
554 return rate;
555}
556
Sathya Perla4097f662009-03-24 16:40:13 -0700557static void be_tx_rate_update(struct be_adapter *adapter)
558{
Sathya Perla3abcded2010-10-03 22:12:27 -0700559 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700560 ulong now = jiffies;
561
562 /* Wrapped around? */
563 if (time_before(now, stats->be_tx_jiffies)) {
564 stats->be_tx_jiffies = now;
565 return;
566 }
567
568 /* Update tx rate once in two seconds */
569 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700570 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
571 - stats->be_tx_bytes_prev,
572 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700573 stats->be_tx_jiffies = now;
574 stats->be_tx_bytes_prev = stats->be_tx_bytes;
575 }
576}
577
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000579 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla3abcded2010-10-03 22:12:27 -0700581 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582 stats->be_tx_reqs++;
583 stats->be_tx_wrbs += wrb_cnt;
584 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000585 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 if (stopped)
587 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588}
589
590/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000591static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
592 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700594 int cnt = (skb->len > skb->data_len);
595
596 cnt += skb_shinfo(skb)->nr_frags;
597
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598 /* to account for hdr wrb */
599 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 if (lancer_chip(adapter) || !(cnt & 1)) {
601 *dummy = false;
602 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700603 /* add a dummy to make it an even num */
604 cnt++;
605 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000606 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
608 return cnt;
609}
610
611static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
612{
613 wrb->frag_pa_hi = upper_32_bits(addr);
614 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
615 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
616}
617
Somnath Koturcc4ce022010-10-21 07:11:14 -0700618static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
619 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700620{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700621 u8 vlan_prio = 0;
622 u16 vlan_tag = 0;
623
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624 memset(hdr, 0, sizeof(*hdr));
625
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
627
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000628 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
630 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
631 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000632 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000633 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000634 if (lancer_chip(adapter) && adapter->sli_family ==
635 LANCER_A0_SLI_FAMILY) {
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
637 if (is_tcp_pkt(skb))
638 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
639 tcpcs, hdr, 1);
640 else if (is_udp_pkt(skb))
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
642 udpcs, hdr, 1);
643 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
645 if (is_tcp_pkt(skb))
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
647 else if (is_udp_pkt(skb))
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
649 }
650
Somnath Koturcc4ce022010-10-21 07:11:14 -0700651 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700653 vlan_tag = vlan_tx_tag_get(skb);
654 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
655 /* If vlan priority provided by OS is NOT in available bmap */
656 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
657 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
658 adapter->recommended_prio;
659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 }
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666}
667
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000668static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000669 bool unmap_single)
670{
671 dma_addr_t dma;
672
673 be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000676 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000677 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000678 dma_unmap_single(dev, dma, wrb->frag_len,
679 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000680 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000681 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000682 }
683}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
685static int make_tx_wrbs(struct be_adapter *adapter,
686 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687{
Sathya Perla7101e112010-03-22 20:41:12 +0000688 dma_addr_t busaddr;
689 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000690 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 struct sk_buff *first_skb = skb;
692 struct be_queue_info *txq = &adapter->tx_obj.q;
693 struct be_eth_wrb *wrb;
694 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000695 bool map_single = false;
696 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 hdr = queue_head_node(txq);
699 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000700 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701
David S. Millerebc8d2a2009-06-09 01:01:31 -0700702 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700703 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000704 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
705 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000706 goto dma_err;
707 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700708 wrb = queue_head_node(txq);
709 wrb_fill(wrb, busaddr, len);
710 be_dws_cpu_to_le(wrb, sizeof(*wrb));
711 queue_head_inc(txq);
712 copied += len;
713 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714
David S. Millerebc8d2a2009-06-09 01:01:31 -0700715 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
716 struct skb_frag_struct *frag =
717 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000718 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
719 frag->size, DMA_TO_DEVICE);
720 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000721 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700722 wrb = queue_head_node(txq);
723 wrb_fill(wrb, busaddr, frag->size);
724 be_dws_cpu_to_le(wrb, sizeof(*wrb));
725 queue_head_inc(txq);
726 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700727 }
728
729 if (dummy_wrb) {
730 wrb = queue_head_node(txq);
731 wrb_fill(wrb, 0, 0);
732 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733 queue_head_inc(txq);
734 }
735
Somnath Koturcc4ce022010-10-21 07:11:14 -0700736 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 be_dws_cpu_to_le(hdr, sizeof(*hdr));
738
739 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000740dma_err:
741 txq->head = map_head;
742 while (copied) {
743 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000744 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000745 map_single = false;
746 copied -= wrb->frag_len;
747 queue_head_inc(txq);
748 }
749 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750}
751
Stephen Hemminger613573252009-08-31 19:50:58 +0000752static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700753 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754{
755 struct be_adapter *adapter = netdev_priv(netdev);
756 struct be_tx_obj *tx_obj = &adapter->tx_obj;
757 struct be_queue_info *txq = &tx_obj->q;
758 u32 wrb_cnt = 0, copied = 0;
759 u32 start = txq->head;
760 bool dummy_wrb, stopped = false;
761
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000762 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763
764 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000765 if (copied) {
766 /* record the sent skb in the sent_skb table */
767 BUG_ON(tx_obj->sent_skb_list[start]);
768 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700769
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000770 /* Ensure txq has space for the next skb; Else stop the queue
771 * *BEFORE* ringing the tx doorbell, so that we serialze the
772 * tx compls of the current transmit which'll wake up the queue
773 */
Sathya Perla7101e112010-03-22 20:41:12 +0000774 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000775 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
776 txq->len) {
777 netif_stop_queue(netdev);
778 stopped = true;
779 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000781 be_txq_notify(adapter, txq->id, wrb_cnt);
782
Ajit Khaparde91992e42010-02-19 13:57:12 +0000783 be_tx_stats_update(adapter, wrb_cnt, copied,
784 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000785 } else {
786 txq->head = start;
787 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789 return NETDEV_TX_OK;
790}
791
792static int be_change_mtu(struct net_device *netdev, int new_mtu)
793{
794 struct be_adapter *adapter = netdev_priv(netdev);
795 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000796 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
797 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 dev_info(&adapter->pdev->dev,
799 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000800 BE_MIN_MTU,
801 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 return -EINVAL;
803 }
804 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
805 netdev->mtu, new_mtu);
806 netdev->mtu = new_mtu;
807 return 0;
808}
809
810/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000811 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
812 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000814static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 u16 vtag[BE_NUM_VLANS_SUPPORTED];
817 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000818 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000819 u32 if_handle;
820
821 if (vf) {
822 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
823 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
824 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
825 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826
Ajit Khaparde82903e42010-02-09 01:34:57 +0000827 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000829 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830 if (adapter->vlan_tag[i]) {
831 vtag[ntags] = cpu_to_le16(i);
832 ntags++;
833 }
834 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700835 status = be_cmd_vlan_config(adapter, adapter->if_handle,
836 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700838 status = be_cmd_vlan_config(adapter, adapter->if_handle,
839 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000841
Sathya Perlab31c50a2009-09-17 10:30:13 -0700842 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843}
844
845static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
846{
847 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850}
851
852static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
853{
854 struct be_adapter *adapter = netdev_priv(netdev);
855
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000856 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000857 if (!be_physfn(adapter))
858 return;
859
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700860 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000861 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000862 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863}
864
865static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
866{
867 struct be_adapter *adapter = netdev_priv(netdev);
868
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000869 adapter->vlans_added--;
870 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
871
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000872 if (!be_physfn(adapter))
873 return;
874
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000876 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000877 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878}
879
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880static void be_set_multicast_list(struct net_device *netdev)
881{
882 struct be_adapter *adapter = netdev_priv(netdev);
883
884 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000885 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000886 adapter->promiscuous = true;
887 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000889
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300890 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000891 if (adapter->promiscuous) {
892 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000893 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000894 }
895
Sathya Perlae7b909a2009-11-22 22:01:10 +0000896 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000897 if (netdev->flags & IFF_ALLMULTI ||
898 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000899 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000900 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000901 goto done;
902 }
903
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000904 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800905 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000906done:
907 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908}
909
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000910static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
911{
912 struct be_adapter *adapter = netdev_priv(netdev);
913 int status;
914
915 if (!adapter->sriov_enabled)
916 return -EPERM;
917
918 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
919 return -EINVAL;
920
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000921 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
922 status = be_cmd_pmac_del(adapter,
923 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000924 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000925
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000926 status = be_cmd_pmac_add(adapter, mac,
927 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000928 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000929
930 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000931 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
932 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000933 else
934 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
935
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000936 return status;
937}
938
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000939static int be_get_vf_config(struct net_device *netdev, int vf,
940 struct ifla_vf_info *vi)
941{
942 struct be_adapter *adapter = netdev_priv(netdev);
943
944 if (!adapter->sriov_enabled)
945 return -EPERM;
946
947 if (vf >= num_vfs)
948 return -EINVAL;
949
950 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000951 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000952 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000953 vi->qos = 0;
954 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
955
956 return 0;
957}
958
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000959static int be_set_vf_vlan(struct net_device *netdev,
960 int vf, u16 vlan, u8 qos)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
963 int status = 0;
964
965 if (!adapter->sriov_enabled)
966 return -EPERM;
967
968 if ((vf >= num_vfs) || (vlan > 4095))
969 return -EINVAL;
970
971 if (vlan) {
972 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
973 adapter->vlans_added++;
974 } else {
975 adapter->vf_cfg[vf].vf_vlan_tag = 0;
976 adapter->vlans_added--;
977 }
978
979 status = be_vid_config(adapter, true, vf);
980
981 if (status)
982 dev_info(&adapter->pdev->dev,
983 "VLAN %d config on VF %d failed\n", vlan, vf);
984 return status;
985}
986
Ajit Khapardee1d18732010-07-23 01:52:13 +0000987static int be_set_vf_tx_rate(struct net_device *netdev,
988 int vf, int rate)
989{
990 struct be_adapter *adapter = netdev_priv(netdev);
991 int status = 0;
992
993 if (!adapter->sriov_enabled)
994 return -EPERM;
995
996 if ((vf >= num_vfs) || (rate < 0))
997 return -EINVAL;
998
999 if (rate > 10000)
1000 rate = 10000;
1001
1002 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +00001003 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001004
1005 if (status)
1006 dev_info(&adapter->pdev->dev,
1007 "tx rate %d on VF %d failed\n", rate, vf);
1008 return status;
1009}
1010
Sathya Perla3abcded2010-10-03 22:12:27 -07001011static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012{
Sathya Perla3abcded2010-10-03 22:12:27 -07001013 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001014 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015
Sathya Perla4097f662009-03-24 16:40:13 -07001016 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001017 if (time_before(now, stats->rx_jiffies)) {
1018 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001019 return;
1020 }
1021
1022 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -07001023 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001024 return;
1025
Sathya Perla3abcded2010-10-03 22:12:27 -07001026 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1027 now - stats->rx_jiffies);
1028 stats->rx_jiffies = now;
1029 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -07001030}
1031
Sathya Perla3abcded2010-10-03 22:12:27 -07001032static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001033 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001034{
Sathya Perla3abcded2010-10-03 22:12:27 -07001035 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001036
Sathya Perla3abcded2010-10-03 22:12:27 -07001037 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001038 stats->rx_frags += rxcp->num_rcvd;
1039 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001040 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001041 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001042 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001043 if (rxcp->err)
1044 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001045}
1046
Sathya Perla2e588f82011-03-11 02:49:26 +00001047static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001048{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001049 /* L4 checksum is not reliable for non TCP/UDP packets.
1050 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001051 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1052 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001053}
1054
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001056get_rx_page_info(struct be_adapter *adapter,
1057 struct be_rx_obj *rxo,
1058 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059{
1060 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001061 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062
Sathya Perla3abcded2010-10-03 22:12:27 -07001063 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064 BUG_ON(!rx_page_info->page);
1065
Ajit Khaparde205859a2010-02-09 01:34:21 +00001066 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001067 dma_unmap_page(&adapter->pdev->dev,
1068 dma_unmap_addr(rx_page_info, bus),
1069 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001070 rx_page_info->last_page_user = false;
1071 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072
1073 atomic_dec(&rxq->used);
1074 return rx_page_info;
1075}
1076
1077/* Throwaway the data in the Rx completion */
1078static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001079 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001080 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081{
Sathya Perla3abcded2010-10-03 22:12:27 -07001082 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001084 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001086 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001087 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001088 put_page(page_info->page);
1089 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001090 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091 }
1092}
1093
1094/*
1095 * skb_fill_rx_data forms a complete skb for an ether frame
1096 * indicated by rxcp.
1097 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001098static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001099 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100{
Sathya Perla3abcded2010-10-03 22:12:27 -07001101 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001103 u16 i, j;
1104 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105 u8 *start;
1106
Sathya Perla2e588f82011-03-11 02:49:26 +00001107 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001108 start = page_address(page_info->page) + page_info->page_offset;
1109 prefetch(start);
1110
1111 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001112 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113
1114 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001115 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 memcpy(skb->data, start, hdr_len);
1117 skb->len = curr_frag_len;
1118 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1119 /* Complete packet has now been moved to data */
1120 put_page(page_info->page);
1121 skb->data_len = 0;
1122 skb->tail += curr_frag_len;
1123 } else {
1124 skb_shinfo(skb)->nr_frags = 1;
1125 skb_shinfo(skb)->frags[0].page = page_info->page;
1126 skb_shinfo(skb)->frags[0].page_offset =
1127 page_info->page_offset + hdr_len;
1128 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1129 skb->data_len = curr_frag_len - hdr_len;
1130 skb->tail += hdr_len;
1131 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001132 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133
Sathya Perla2e588f82011-03-11 02:49:26 +00001134 if (rxcp->pkt_size <= rx_frag_size) {
1135 BUG_ON(rxcp->num_rcvd != 1);
1136 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001137 }
1138
1139 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001140 index_inc(&rxcp->rxq_idx, rxq->len);
1141 remaining = rxcp->pkt_size - curr_frag_len;
1142 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1143 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1144 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001146 /* Coalesce all frags from the same physical page in one slot */
1147 if (page_info->page_offset == 0) {
1148 /* Fresh page */
1149 j++;
1150 skb_shinfo(skb)->frags[j].page = page_info->page;
1151 skb_shinfo(skb)->frags[j].page_offset =
1152 page_info->page_offset;
1153 skb_shinfo(skb)->frags[j].size = 0;
1154 skb_shinfo(skb)->nr_frags++;
1155 } else {
1156 put_page(page_info->page);
1157 }
1158
1159 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001160 skb->len += curr_frag_len;
1161 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001162
Sathya Perla2e588f82011-03-11 02:49:26 +00001163 remaining -= curr_frag_len;
1164 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001165 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001167 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168}
1169
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001170/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001172 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001173 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001175 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001177
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001178 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001179 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180 if (net_ratelimit())
1181 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001182 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183 return;
1184 }
1185
Sathya Perla2e588f82011-03-11 02:49:26 +00001186 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001188 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001189 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001190 else
1191 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192
1193 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001194 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001195 if (adapter->netdev->features & NETIF_F_RXHASH)
1196 skb->rxhash = rxcp->rss_hash;
1197
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198
Sathya Perla2e588f82011-03-11 02:49:26 +00001199 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001200 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201 kfree_skb(skb);
1202 return;
1203 }
Somnath Kotur6709d952011-05-04 22:40:46 +00001204 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1205 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206 } else {
1207 netif_receive_skb(skb);
1208 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209}
1210
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001211/* Process the RX completion indicated by rxcp when GRO is enabled */
1212static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001213 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001214 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215{
1216 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001217 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001218 struct be_queue_info *rxq = &rxo->q;
1219 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 u16 remaining, curr_frag_len;
1221 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001222
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001223 skb = napi_get_frags(&eq_obj->napi);
1224 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001225 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001226 return;
1227 }
1228
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 remaining = rxcp->pkt_size;
1230 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1231 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232
1233 curr_frag_len = min(remaining, rx_frag_size);
1234
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001235 /* Coalesce all frags from the same physical page in one slot */
1236 if (i == 0 || page_info->page_offset == 0) {
1237 /* First frag or Fresh page */
1238 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001239 skb_shinfo(skb)->frags[j].page = page_info->page;
1240 skb_shinfo(skb)->frags[j].page_offset =
1241 page_info->page_offset;
1242 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001243 } else {
1244 put_page(page_info->page);
1245 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001246 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001247
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001249 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250 memset(page_info, 0, sizeof(*page_info));
1251 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001252 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001253
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001254 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001255 skb->len = rxcp->pkt_size;
1256 skb->data_len = rxcp->pkt_size;
1257 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001258 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001259 if (adapter->netdev->features & NETIF_F_RXHASH)
1260 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001261
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001263 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001264 else
Somnath Kotur6709d952011-05-04 22:40:46 +00001265 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1266 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267}
1268
Sathya Perla2e588f82011-03-11 02:49:26 +00001269static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1270 struct be_eth_rx_compl *compl,
1271 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272{
Sathya Perla2e588f82011-03-11 02:49:26 +00001273 rxcp->pkt_size =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1275 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1276 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1277 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001278 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001279 rxcp->ip_csum =
1280 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1281 rxcp->l4_csum =
1282 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1283 rxcp->ipv6 =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1285 rxcp->rxq_idx =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1287 rxcp->num_rcvd =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1289 rxcp->pkt_type =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001291 rxcp->rss_hash =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001293 if (rxcp->vlanf) {
1294 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001295 compl);
1296 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1297 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001298 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001299}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300
Sathya Perla2e588f82011-03-11 02:49:26 +00001301static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1302 struct be_eth_rx_compl *compl,
1303 struct be_rx_compl_info *rxcp)
1304{
1305 rxcp->pkt_size =
1306 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1307 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1308 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1309 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001310 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001311 rxcp->ip_csum =
1312 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1313 rxcp->l4_csum =
1314 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1315 rxcp->ipv6 =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1317 rxcp->rxq_idx =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1319 rxcp->num_rcvd =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1321 rxcp->pkt_type =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001323 rxcp->rss_hash =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001325 if (rxcp->vlanf) {
1326 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001327 compl);
1328 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1329 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001330 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001331}
1332
1333static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1334{
1335 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1336 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1337 struct be_adapter *adapter = rxo->adapter;
1338
1339 /* For checking the valid bit it is Ok to use either definition as the
1340 * valid bit is at the same position in both v0 and v1 Rx compl */
1341 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342 return NULL;
1343
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001344 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001345 be_dws_le_to_cpu(compl, sizeof(*compl));
1346
1347 if (adapter->be3_native)
1348 be_parse_rx_compl_v1(adapter, compl, rxcp);
1349 else
1350 be_parse_rx_compl_v0(adapter, compl, rxcp);
1351
Sathya Perla15d72182011-03-21 20:49:26 +00001352 if (rxcp->vlanf) {
1353 /* vlanf could be wrongly set in some cards.
1354 * ignore if vtm is not set */
1355 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1356 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001357
Sathya Perla15d72182011-03-21 20:49:26 +00001358 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001359 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001360
David S. Miller3c709f82011-05-11 14:26:15 -04001361 if (((adapter->pvid & VLAN_VID_MASK) ==
1362 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1363 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001364 rxcp->vlanf = 0;
1365 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001366
1367 /* As the compl has been parsed, reset it; we wont touch it again */
1368 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001369
Sathya Perla3abcded2010-10-03 22:12:27 -07001370 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371 return rxcp;
1372}
1373
Eric Dumazet1829b082011-03-01 05:48:12 +00001374static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001377
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001379 gfp |= __GFP_COMP;
1380 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381}
1382
1383/*
1384 * Allocate a page, split it to fragments of size rx_frag_size and post as
1385 * receive buffers to BE
1386 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001387static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388{
Sathya Perla3abcded2010-10-03 22:12:27 -07001389 struct be_adapter *adapter = rxo->adapter;
1390 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001391 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001392 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 struct page *pagep = NULL;
1394 struct be_eth_rx_d *rxd;
1395 u64 page_dmaaddr = 0, frag_dmaaddr;
1396 u32 posted, page_offset = 0;
1397
Sathya Perla3abcded2010-10-03 22:12:27 -07001398 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1400 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001401 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001403 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 break;
1405 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001406 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1407 0, adapter->big_page_size,
1408 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 page_info->page_offset = 0;
1410 } else {
1411 get_page(pagep);
1412 page_info->page_offset = page_offset + rx_frag_size;
1413 }
1414 page_offset = page_info->page_offset;
1415 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001416 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1418
1419 rxd = queue_head_node(rxq);
1420 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1421 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422
1423 /* Any space left in the current big page for another frag? */
1424 if ((page_offset + rx_frag_size + rx_frag_size) >
1425 adapter->big_page_size) {
1426 pagep = NULL;
1427 page_info->last_page_user = true;
1428 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001429
1430 prev_page_info = page_info;
1431 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432 page_info = &page_info_tbl[rxq->head];
1433 }
1434 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001435 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436
1437 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001439 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001440 } else if (atomic_read(&rxq->used) == 0) {
1441 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001442 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444}
1445
Sathya Perla5fb379e2009-06-18 00:02:59 +00001446static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1449
1450 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1451 return NULL;
1452
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001453 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1455
1456 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1457
1458 queue_tail_inc(tx_cq);
1459 return txcp;
1460}
1461
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001462static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463{
1464 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001465 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1467 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001468 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1469 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001471 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001473 sent_skbs[txq->tail] = NULL;
1474
1475 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001476 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001478 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001480 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001481 unmap_tx_frag(&adapter->pdev->dev, wrb,
1482 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001483 unmap_skb_hdr = false;
1484
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 num_wrbs++;
1486 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001487 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001490 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491}
1492
Sathya Perla859b1e42009-08-10 03:43:51 +00001493static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1494{
1495 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1496
1497 if (!eqe->evt)
1498 return NULL;
1499
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001500 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001501 eqe->evt = le32_to_cpu(eqe->evt);
1502 queue_tail_inc(&eq_obj->q);
1503 return eqe;
1504}
1505
1506static int event_handle(struct be_adapter *adapter,
1507 struct be_eq_obj *eq_obj)
1508{
1509 struct be_eq_entry *eqe;
1510 u16 num = 0;
1511
1512 while ((eqe = event_get(eq_obj)) != NULL) {
1513 eqe->evt = 0;
1514 num++;
1515 }
1516
1517 /* Deal with any spurious interrupts that come
1518 * without events
1519 */
1520 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1521 if (num)
1522 napi_schedule(&eq_obj->napi);
1523
1524 return num;
1525}
1526
1527/* Just read and notify events without processing them.
1528 * Used at the time of destroying event queues */
1529static void be_eq_clean(struct be_adapter *adapter,
1530 struct be_eq_obj *eq_obj)
1531{
1532 struct be_eq_entry *eqe;
1533 u16 num = 0;
1534
1535 while ((eqe = event_get(eq_obj)) != NULL) {
1536 eqe->evt = 0;
1537 num++;
1538 }
1539
1540 if (num)
1541 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1542}
1543
Sathya Perla3abcded2010-10-03 22:12:27 -07001544static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545{
1546 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001547 struct be_queue_info *rxq = &rxo->q;
1548 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001549 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 u16 tail;
1551
1552 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001553 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1554 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001555 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 }
1557
1558 /* Then free posted rx buffer that were not used */
1559 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001560 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001561 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 put_page(page_info->page);
1563 memset(page_info, 0, sizeof(*page_info));
1564 }
1565 BUG_ON(atomic_read(&rxq->used));
1566}
1567
Sathya Perlaa8e91792009-08-10 03:42:43 +00001568static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001570 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001572 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001573 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001574 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1575 struct sk_buff *sent_skb;
1576 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577
Sathya Perlaa8e91792009-08-10 03:42:43 +00001578 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1579 do {
1580 while ((txcp = be_tx_compl_get(tx_cq))) {
1581 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1582 wrb_index, txcp);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001583 num_wrbs += be_tx_compl_process(adapter, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001584 cmpl++;
1585 }
1586 if (cmpl) {
1587 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001588 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001589 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001590 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001591 }
1592
1593 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1594 break;
1595
1596 mdelay(1);
1597 } while (true);
1598
1599 if (atomic_read(&txq->used))
1600 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1601 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001602
1603 /* free posted tx for which compls will never arrive */
1604 while (atomic_read(&txq->used)) {
1605 sent_skb = sent_skbs[txq->tail];
1606 end_idx = txq->tail;
1607 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001608 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1609 txq->len);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001610 num_wrbs = be_tx_compl_process(adapter, end_idx);
1611 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613}
1614
Sathya Perla5fb379e2009-06-18 00:02:59 +00001615static void be_mcc_queues_destroy(struct be_adapter *adapter)
1616{
1617 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001618
Sathya Perla8788fdc2009-07-27 22:52:03 +00001619 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001620 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001621 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001622 be_queue_free(adapter, q);
1623
Sathya Perla8788fdc2009-07-27 22:52:03 +00001624 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001625 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001626 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001627 be_queue_free(adapter, q);
1628}
1629
1630/* Must be called only after TX qs are created as MCC shares TX EQ */
1631static int be_mcc_queues_create(struct be_adapter *adapter)
1632{
1633 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001634
1635 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001636 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001637 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001638 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001639 goto err;
1640
1641 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001642 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001643 goto mcc_cq_free;
1644
1645 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001646 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001647 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1648 goto mcc_cq_destroy;
1649
1650 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001651 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001652 goto mcc_q_free;
1653
1654 return 0;
1655
1656mcc_q_free:
1657 be_queue_free(adapter, q);
1658mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001659 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001660mcc_cq_free:
1661 be_queue_free(adapter, cq);
1662err:
1663 return -1;
1664}
1665
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666static void be_tx_queues_destroy(struct be_adapter *adapter)
1667{
1668 struct be_queue_info *q;
1669
1670 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001671 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001672 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673 be_queue_free(adapter, q);
1674
1675 q = &adapter->tx_obj.cq;
1676 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001677 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 be_queue_free(adapter, q);
1679
Sathya Perla859b1e42009-08-10 03:43:51 +00001680 /* Clear any residual events */
1681 be_eq_clean(adapter, &adapter->tx_eq);
1682
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683 q = &adapter->tx_eq.q;
1684 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001685 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686 be_queue_free(adapter, q);
1687}
1688
1689static int be_tx_queues_create(struct be_adapter *adapter)
1690{
1691 struct be_queue_info *eq, *q, *cq;
1692
1693 adapter->tx_eq.max_eqd = 0;
1694 adapter->tx_eq.min_eqd = 0;
1695 adapter->tx_eq.cur_eqd = 96;
1696 adapter->tx_eq.enable_aic = false;
1697 /* Alloc Tx Event queue */
1698 eq = &adapter->tx_eq.q;
1699 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1700 return -1;
1701
1702 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001703 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001705
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001706 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001707
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001708
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709 /* Alloc TX eth compl queue */
1710 cq = &adapter->tx_obj.cq;
1711 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1712 sizeof(struct be_eth_tx_compl)))
1713 goto tx_eq_destroy;
1714
1715 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001716 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 goto tx_cq_free;
1718
1719 /* Alloc TX eth queue */
1720 q = &adapter->tx_obj.q;
1721 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1722 goto tx_cq_destroy;
1723
1724 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001725 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726 goto tx_q_free;
1727 return 0;
1728
1729tx_q_free:
1730 be_queue_free(adapter, q);
1731tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001732 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733tx_cq_free:
1734 be_queue_free(adapter, cq);
1735tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001736 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737tx_eq_free:
1738 be_queue_free(adapter, eq);
1739 return -1;
1740}
1741
1742static void be_rx_queues_destroy(struct be_adapter *adapter)
1743{
1744 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001745 struct be_rx_obj *rxo;
1746 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747
Sathya Perla3abcded2010-10-03 22:12:27 -07001748 for_all_rx_queues(adapter, rxo, i) {
1749 q = &rxo->q;
1750 if (q->created) {
1751 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1752 /* After the rxq is invalidated, wait for a grace time
1753 * of 1ms for all dma to end and the flush compl to
1754 * arrive
1755 */
1756 mdelay(1);
1757 be_rx_q_clean(adapter, rxo);
1758 }
1759 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001760
Sathya Perla3abcded2010-10-03 22:12:27 -07001761 q = &rxo->cq;
1762 if (q->created)
1763 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1764 be_queue_free(adapter, q);
1765
1766 /* Clear any residual events */
1767 q = &rxo->rx_eq.q;
1768 if (q->created) {
1769 be_eq_clean(adapter, &rxo->rx_eq);
1770 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1771 }
1772 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774}
1775
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001776static u32 be_num_rxqs_want(struct be_adapter *adapter)
1777{
1778 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1779 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1780 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1781 } else {
1782 dev_warn(&adapter->pdev->dev,
1783 "No support for multiple RX queues\n");
1784 return 1;
1785 }
1786}
1787
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001788static int be_rx_queues_create(struct be_adapter *adapter)
1789{
1790 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001791 struct be_rx_obj *rxo;
1792 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001793
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001794 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1795 msix_enabled(adapter) ?
1796 adapter->num_msix_vec - 1 : 1);
1797 if (adapter->num_rx_qs != MAX_RX_QS)
1798 dev_warn(&adapter->pdev->dev,
1799 "Can create only %d RX queues", adapter->num_rx_qs);
1800
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001802 for_all_rx_queues(adapter, rxo, i) {
1803 rxo->adapter = adapter;
1804 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1805 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806
Sathya Perla3abcded2010-10-03 22:12:27 -07001807 /* EQ */
1808 eq = &rxo->rx_eq.q;
1809 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1810 sizeof(struct be_eq_entry));
1811 if (rc)
1812 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813
Sathya Perla3abcded2010-10-03 22:12:27 -07001814 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1815 if (rc)
1816 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001818 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001819
Sathya Perla3abcded2010-10-03 22:12:27 -07001820 /* CQ */
1821 cq = &rxo->cq;
1822 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1823 sizeof(struct be_eth_rx_compl));
1824 if (rc)
1825 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826
Sathya Perla3abcded2010-10-03 22:12:27 -07001827 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1828 if (rc)
1829 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001830 /* Rx Q */
1831 q = &rxo->q;
1832 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1833 sizeof(struct be_eth_rx_d));
1834 if (rc)
1835 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001836
Sathya Perla3abcded2010-10-03 22:12:27 -07001837 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1838 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1839 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1840 if (rc)
1841 goto err;
1842 }
1843
1844 if (be_multi_rxq(adapter)) {
1845 u8 rsstable[MAX_RSS_QS];
1846
1847 for_all_rss_queues(adapter, rxo, i)
1848 rsstable[i] = rxo->rss_id;
1849
1850 rc = be_cmd_rss_config(adapter, rsstable,
1851 adapter->num_rx_qs - 1);
1852 if (rc)
1853 goto err;
1854 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
1856 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001857err:
1858 be_rx_queues_destroy(adapter);
1859 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001862static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001863{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001864 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1865 if (!eqe->evt)
1866 return false;
1867 else
1868 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001869}
1870
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871static irqreturn_t be_intx(int irq, void *dev)
1872{
1873 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001874 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001875 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001877 if (lancer_chip(adapter)) {
1878 if (event_peek(&adapter->tx_eq))
1879 tx = event_handle(adapter, &adapter->tx_eq);
1880 for_all_rx_queues(adapter, rxo, i) {
1881 if (event_peek(&rxo->rx_eq))
1882 rx |= event_handle(adapter, &rxo->rx_eq);
1883 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001885 if (!(tx || rx))
1886 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001887
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001888 } else {
1889 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1890 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1891 if (!isr)
1892 return IRQ_NONE;
1893
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001894 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001895 event_handle(adapter, &adapter->tx_eq);
1896
1897 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001898 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001899 event_handle(adapter, &rxo->rx_eq);
1900 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001901 }
Sathya Perlac001c212009-07-01 01:06:07 +00001902
Sathya Perla8788fdc2009-07-27 22:52:03 +00001903 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904}
1905
1906static irqreturn_t be_msix_rx(int irq, void *dev)
1907{
Sathya Perla3abcded2010-10-03 22:12:27 -07001908 struct be_rx_obj *rxo = dev;
1909 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910
Sathya Perla3abcded2010-10-03 22:12:27 -07001911 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912
1913 return IRQ_HANDLED;
1914}
1915
Sathya Perla5fb379e2009-06-18 00:02:59 +00001916static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917{
1918 struct be_adapter *adapter = dev;
1919
Sathya Perla8788fdc2009-07-27 22:52:03 +00001920 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921
1922 return IRQ_HANDLED;
1923}
1924
Sathya Perla2e588f82011-03-11 02:49:26 +00001925static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926{
Sathya Perla2e588f82011-03-11 02:49:26 +00001927 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928}
1929
stephen hemminger49b05222010-10-21 07:50:48 +00001930static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931{
1932 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001933 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1934 struct be_adapter *adapter = rxo->adapter;
1935 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001936 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937 u32 work_done;
1938
Sathya Perla3abcded2010-10-03 22:12:27 -07001939 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001941 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942 if (!rxcp)
1943 break;
1944
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001945 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001946 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001947 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001948 be_rx_compl_process_gro(adapter, rxo, rxcp);
1949 else
1950 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001951 } else if (rxcp->pkt_size == 0) {
1952 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001953 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001954
Sathya Perla2e588f82011-03-11 02:49:26 +00001955 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956 }
1957
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001959 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001960 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961
1962 /* All consumed */
1963 if (work_done < budget) {
1964 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001965 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966 } else {
1967 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001968 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969 }
1970 return work_done;
1971}
1972
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001973/* As TX and MCC share the same EQ check for both TX and MCC completions.
1974 * For TX/MCC we don't honour budget; consume everything
1975 */
1976static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001978 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1979 struct be_adapter *adapter =
1980 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001981 struct be_queue_info *txq = &adapter->tx_obj.q;
1982 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001984 int tx_compl = 0, mcc_compl, status = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001985 u16 end_idx, num_wrbs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986
Sathya Perla5fb379e2009-06-18 00:02:59 +00001987 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001989 wrb_index, txcp);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001990 num_wrbs += be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001991 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992 }
1993
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001994 mcc_compl = be_process_mcc(adapter, &status);
1995
1996 napi_complete(napi);
1997
1998 if (mcc_compl) {
1999 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2000 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2001 }
2002
2003 if (tx_compl) {
2004 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002005
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002006 atomic_sub(num_wrbs, &txq->used);
2007
Sathya Perla5fb379e2009-06-18 00:02:59 +00002008 /* As Tx wrbs have been freed up, wake up netdev queue if
2009 * it was stopped due to lack of tx wrbs.
2010 */
2011 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00002013 netif_wake_queue(adapter->netdev);
2014 }
2015
Sathya Perla3abcded2010-10-03 22:12:27 -07002016 tx_stats(adapter)->be_tx_events++;
2017 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019
2020 return 1;
2021}
2022
Ajit Khaparded053de92010-09-03 06:23:30 +00002023void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002024{
2025 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2026 u32 i;
2027
2028 pci_read_config_dword(adapter->pdev,
2029 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2030 pci_read_config_dword(adapter->pdev,
2031 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2032 pci_read_config_dword(adapter->pdev,
2033 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2034 pci_read_config_dword(adapter->pdev,
2035 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2036
2037 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2038 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2039
Ajit Khaparded053de92010-09-03 06:23:30 +00002040 if (ue_status_lo || ue_status_hi) {
2041 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002042 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00002043 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2044 }
2045
Ajit Khaparde7c185272010-07-29 06:16:33 +00002046 if (ue_status_lo) {
2047 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2048 if (ue_status_lo & 1)
2049 dev_err(&adapter->pdev->dev,
2050 "UE: %s bit set\n", ue_status_low_desc[i]);
2051 }
2052 }
2053 if (ue_status_hi) {
2054 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2055 if (ue_status_hi & 1)
2056 dev_err(&adapter->pdev->dev,
2057 "UE: %s bit set\n", ue_status_hi_desc[i]);
2058 }
2059 }
2060
2061}
2062
Sathya Perlaea1dae12009-03-19 23:56:20 -07002063static void be_worker(struct work_struct *work)
2064{
2065 struct be_adapter *adapter =
2066 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002067 struct be_rx_obj *rxo;
2068 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002069
Sathya Perla16da8252011-03-21 20:49:27 +00002070 if (!adapter->ue_detected && !lancer_chip(adapter))
2071 be_detect_dump_ue(adapter);
2072
Somnath Koturf203af72010-10-25 23:01:03 +00002073 /* when interrupts are not yet enabled, just reap any pending
2074 * mcc completions */
2075 if (!netif_running(adapter->netdev)) {
2076 int mcc_compl, status = 0;
2077
2078 mcc_compl = be_process_mcc(adapter, &status);
2079
2080 if (mcc_compl) {
2081 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2082 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2083 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002084
Somnath Koturf203af72010-10-25 23:01:03 +00002085 goto reschedule;
2086 }
2087
Selvin Xavier005d5692011-05-16 07:36:35 +00002088 if (!adapter->stats_cmd_sent) {
2089 if (lancer_chip(adapter))
2090 lancer_cmd_get_pport_stats(adapter,
2091 &adapter->stats_cmd);
2092 else
2093 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2094 }
Sathya Perla4097f662009-03-24 16:40:13 -07002095 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07002096
Sathya Perla3abcded2010-10-03 22:12:27 -07002097 for_all_rx_queues(adapter, rxo, i) {
2098 be_rx_rate_update(rxo);
2099 be_rx_eqd_update(adapter, rxo);
2100
2101 if (rxo->rx_post_starved) {
2102 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002103 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002104 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002105 }
2106
Somnath Koturf203af72010-10-25 23:01:03 +00002107reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002108 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002109 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2110}
2111
Sathya Perla8d56ff12009-11-22 22:02:26 +00002112static void be_msix_disable(struct be_adapter *adapter)
2113{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002114 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002115 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002116 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002117 }
2118}
2119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120static void be_msix_enable(struct be_adapter *adapter)
2121{
Sathya Perla3abcded2010-10-03 22:12:27 -07002122#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002123 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002125 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002126
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002127 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128 adapter->msix_entries[i].entry = i;
2129
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002130 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002131 if (status == 0) {
2132 goto done;
2133 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002134 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002135 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002136 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002137 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002138 }
2139 return;
2140done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002141 adapter->num_msix_vec = num_vec;
2142 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143}
2144
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002145static void be_sriov_enable(struct be_adapter *adapter)
2146{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002147 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002148#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002149 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002150 int status, pos;
2151 u16 nvfs;
2152
2153 pos = pci_find_ext_capability(adapter->pdev,
2154 PCI_EXT_CAP_ID_SRIOV);
2155 pci_read_config_word(adapter->pdev,
2156 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2157
2158 if (num_vfs > nvfs) {
2159 dev_info(&adapter->pdev->dev,
2160 "Device supports %d VFs and not %d\n",
2161 nvfs, num_vfs);
2162 num_vfs = nvfs;
2163 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002164
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002165 status = pci_enable_sriov(adapter->pdev, num_vfs);
2166 adapter->sriov_enabled = status ? false : true;
2167 }
2168#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002169}
2170
2171static void be_sriov_disable(struct be_adapter *adapter)
2172{
2173#ifdef CONFIG_PCI_IOV
2174 if (adapter->sriov_enabled) {
2175 pci_disable_sriov(adapter->pdev);
2176 adapter->sriov_enabled = false;
2177 }
2178#endif
2179}
2180
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002181static inline int be_msix_vec_get(struct be_adapter *adapter,
2182 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002184 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002185}
2186
2187static int be_request_irq(struct be_adapter *adapter,
2188 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002189 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002190{
2191 struct net_device *netdev = adapter->netdev;
2192 int vec;
2193
2194 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002195 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002196 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002197}
2198
Sathya Perla3abcded2010-10-03 22:12:27 -07002199static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2200 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002201{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002202 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002203 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204}
2205
2206static int be_msix_register(struct be_adapter *adapter)
2207{
Sathya Perla3abcded2010-10-03 22:12:27 -07002208 struct be_rx_obj *rxo;
2209 int status, i;
2210 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2213 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002214 if (status)
2215 goto err;
2216
Sathya Perla3abcded2010-10-03 22:12:27 -07002217 for_all_rx_queues(adapter, rxo, i) {
2218 sprintf(qname, "rxq%d", i);
2219 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2220 qname, rxo);
2221 if (status)
2222 goto err_msix;
2223 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002224
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002226
Sathya Perla3abcded2010-10-03 22:12:27 -07002227err_msix:
2228 be_free_irq(adapter, &adapter->tx_eq, adapter);
2229
2230 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2231 be_free_irq(adapter, &rxo->rx_eq, rxo);
2232
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233err:
2234 dev_warn(&adapter->pdev->dev,
2235 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002236 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237 return status;
2238}
2239
2240static int be_irq_register(struct be_adapter *adapter)
2241{
2242 struct net_device *netdev = adapter->netdev;
2243 int status;
2244
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002245 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246 status = be_msix_register(adapter);
2247 if (status == 0)
2248 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002249 /* INTx is not supported for VF */
2250 if (!be_physfn(adapter))
2251 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252 }
2253
2254 /* INTx */
2255 netdev->irq = adapter->pdev->irq;
2256 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2257 adapter);
2258 if (status) {
2259 dev_err(&adapter->pdev->dev,
2260 "INTx request IRQ failed - err %d\n", status);
2261 return status;
2262 }
2263done:
2264 adapter->isr_registered = true;
2265 return 0;
2266}
2267
2268static void be_irq_unregister(struct be_adapter *adapter)
2269{
2270 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002271 struct be_rx_obj *rxo;
2272 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273
2274 if (!adapter->isr_registered)
2275 return;
2276
2277 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002278 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279 free_irq(netdev->irq, adapter);
2280 goto done;
2281 }
2282
2283 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002284 be_free_irq(adapter, &adapter->tx_eq, adapter);
2285
2286 for_all_rx_queues(adapter, rxo, i)
2287 be_free_irq(adapter, &rxo->rx_eq, rxo);
2288
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289done:
2290 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002291}
2292
Sathya Perla889cd4b2010-05-30 23:33:45 +00002293static int be_close(struct net_device *netdev)
2294{
2295 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002296 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002297 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002298 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002299
Sathya Perla889cd4b2010-05-30 23:33:45 +00002300 be_async_mcc_disable(adapter);
2301
Sathya Perla889cd4b2010-05-30 23:33:45 +00002302 netif_carrier_off(netdev);
2303 adapter->link_up = false;
2304
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002305 if (!lancer_chip(adapter))
2306 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002307
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002308 for_all_rx_queues(adapter, rxo, i)
2309 napi_disable(&rxo->rx_eq.napi);
2310
2311 napi_disable(&tx_eq->napi);
2312
2313 if (lancer_chip(adapter)) {
2314 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2315 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2316 for_all_rx_queues(adapter, rxo, i)
2317 be_cq_notify(adapter, rxo->cq.id, false, 0);
2318 }
2319
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002320 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002321 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002322 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002323
2324 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002325 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002326 synchronize_irq(vec);
2327 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002328 } else {
2329 synchronize_irq(netdev->irq);
2330 }
2331 be_irq_unregister(adapter);
2332
Sathya Perla889cd4b2010-05-30 23:33:45 +00002333 /* Wait for all pending tx completions to arrive so that
2334 * all tx skbs are freed.
2335 */
2336 be_tx_compl_clean(adapter);
2337
2338 return 0;
2339}
2340
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002341static int be_open(struct net_device *netdev)
2342{
2343 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002344 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002345 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002346 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002347 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002348 u8 mac_speed;
2349 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002350
Sathya Perla3abcded2010-10-03 22:12:27 -07002351 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002352 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002353 napi_enable(&rxo->rx_eq.napi);
2354 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002355 napi_enable(&tx_eq->napi);
2356
2357 be_irq_register(adapter);
2358
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002359 if (!lancer_chip(adapter))
2360 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002361
2362 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002363 for_all_rx_queues(adapter, rxo, i) {
2364 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2365 be_cq_notify(adapter, rxo->cq.id, true, 0);
2366 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002367 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002368
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002369 /* Now that interrupts are on we can process async mcc */
2370 be_async_mcc_enable(adapter);
2371
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002372 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002373 &link_speed, 0);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002374 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002375 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002376 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002377
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002378 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002379 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002380 if (status)
2381 goto err;
2382
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002383 status = be_cmd_set_flow_control(adapter,
2384 adapter->tx_fc, adapter->rx_fc);
2385 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002386 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002387 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002388
Sathya Perla889cd4b2010-05-30 23:33:45 +00002389 return 0;
2390err:
2391 be_close(adapter->netdev);
2392 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002393}
2394
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002395static int be_setup_wol(struct be_adapter *adapter, bool enable)
2396{
2397 struct be_dma_mem cmd;
2398 int status = 0;
2399 u8 mac[ETH_ALEN];
2400
2401 memset(mac, 0, ETH_ALEN);
2402
2403 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002404 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2405 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002406 if (cmd.va == NULL)
2407 return -1;
2408 memset(cmd.va, 0, cmd.size);
2409
2410 if (enable) {
2411 status = pci_write_config_dword(adapter->pdev,
2412 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2413 if (status) {
2414 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002415 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002416 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2417 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002418 return status;
2419 }
2420 status = be_cmd_enable_magic_wol(adapter,
2421 adapter->netdev->dev_addr, &cmd);
2422 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2423 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2424 } else {
2425 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2426 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2427 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2428 }
2429
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002430 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002431 return status;
2432}
2433
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002434/*
2435 * Generate a seed MAC address from the PF MAC Address using jhash.
2436 * MAC Address for VFs are assigned incrementally starting from the seed.
2437 * These addresses are programmed in the ASIC by the PF and the VF driver
2438 * queries for the MAC address during its probe.
2439 */
2440static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2441{
2442 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002443 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002444 u8 mac[ETH_ALEN];
2445
2446 be_vf_eth_addr_generate(adapter, mac);
2447
2448 for (vf = 0; vf < num_vfs; vf++) {
2449 status = be_cmd_pmac_add(adapter, mac,
2450 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002451 &adapter->vf_cfg[vf].vf_pmac_id,
2452 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002453 if (status)
2454 dev_err(&adapter->pdev->dev,
2455 "Mac address add failed for VF %d\n", vf);
2456 else
2457 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2458
2459 mac[5] += 1;
2460 }
2461 return status;
2462}
2463
2464static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2465{
2466 u32 vf;
2467
2468 for (vf = 0; vf < num_vfs; vf++) {
2469 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2470 be_cmd_pmac_del(adapter,
2471 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002472 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002473 }
2474}
2475
Sathya Perla5fb379e2009-06-18 00:02:59 +00002476static int be_setup(struct be_adapter *adapter)
2477{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002478 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002479 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002481 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002482
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002483 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2484 BE_IF_FLAGS_BROADCAST |
2485 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002486
2487 if (be_physfn(adapter)) {
2488 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2489 BE_IF_FLAGS_PROMISCUOUS |
2490 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2491 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002492
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002493 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002494 cap_flags |= BE_IF_FLAGS_RSS;
2495 en_flags |= BE_IF_FLAGS_RSS;
2496 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002497 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002498
2499 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2500 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002501 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002502 if (status != 0)
2503 goto do_none;
2504
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002505 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002506 if (adapter->sriov_enabled) {
2507 while (vf < num_vfs) {
2508 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2509 BE_IF_FLAGS_BROADCAST;
2510 status = be_cmd_if_create(adapter, cap_flags,
2511 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002512 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002513 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002514 if (status) {
2515 dev_err(&adapter->pdev->dev,
2516 "Interface Create failed for VF %d\n",
2517 vf);
2518 goto if_destroy;
2519 }
2520 adapter->vf_cfg[vf].vf_pmac_id =
2521 BE_INVALID_PMAC_ID;
2522 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002523 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002524 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002525 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002526 status = be_cmd_mac_addr_query(adapter, mac,
2527 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2528 if (!status) {
2529 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2530 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2531 }
2532 }
2533
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002534 status = be_tx_queues_create(adapter);
2535 if (status != 0)
2536 goto if_destroy;
2537
2538 status = be_rx_queues_create(adapter);
2539 if (status != 0)
2540 goto tx_qs_destroy;
2541
Sathya Perla5fb379e2009-06-18 00:02:59 +00002542 status = be_mcc_queues_create(adapter);
2543 if (status != 0)
2544 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002545
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002546 adapter->link_speed = -1;
2547
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002548 return 0;
2549
Sathya Perla5fb379e2009-06-18 00:02:59 +00002550rx_qs_destroy:
2551 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002552tx_qs_destroy:
2553 be_tx_queues_destroy(adapter);
2554if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002555 if (be_physfn(adapter) && adapter->sriov_enabled)
2556 for (vf = 0; vf < num_vfs; vf++)
2557 if (adapter->vf_cfg[vf].vf_if_handle)
2558 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002559 adapter->vf_cfg[vf].vf_if_handle,
2560 vf + 1);
2561 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002562do_none:
2563 return status;
2564}
2565
Sathya Perla5fb379e2009-06-18 00:02:59 +00002566static int be_clear(struct be_adapter *adapter)
2567{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002568 int vf;
2569
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002570 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002571 be_vf_eth_addr_rem(adapter);
2572
Sathya Perla1a8887d2009-08-17 00:58:41 +00002573 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002574 be_rx_queues_destroy(adapter);
2575 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002576 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002577
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002578 if (be_physfn(adapter) && adapter->sriov_enabled)
2579 for (vf = 0; vf < num_vfs; vf++)
2580 if (adapter->vf_cfg[vf].vf_if_handle)
2581 be_cmd_if_destroy(adapter,
2582 adapter->vf_cfg[vf].vf_if_handle,
2583 vf + 1);
2584
Ajit Khaparde658681f2011-02-11 13:34:46 +00002585 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002586
Sathya Perla2243e2e2009-11-22 22:02:03 +00002587 /* tell fw we're done with firing cmds */
2588 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002589 return 0;
2590}
2591
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002592
Ajit Khaparde84517482009-09-04 03:12:16 +00002593#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002594static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002595 const u8 *p, u32 img_start, int image_size,
2596 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002597{
2598 u32 crc_offset;
2599 u8 flashed_crc[4];
2600 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002601
2602 crc_offset = hdr_size + img_start + image_size - 4;
2603
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002604 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002605
2606 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002607 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002608 if (status) {
2609 dev_err(&adapter->pdev->dev,
2610 "could not get crc from flash, not flashing redboot\n");
2611 return false;
2612 }
2613
2614 /*update redboot only if crc does not match*/
2615 if (!memcmp(flashed_crc, p, 4))
2616 return false;
2617 else
2618 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002619}
2620
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002621static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002622 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002623 struct be_dma_mem *flash_cmd, int num_of_images)
2624
Ajit Khaparde84517482009-09-04 03:12:16 +00002625{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002626 int status = 0, i, filehdr_size = 0;
2627 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002628 int num_bytes;
2629 const u8 *p = fw->data;
2630 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002631 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002632 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002633
Joe Perches215faf92010-12-21 02:16:10 -08002634 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002635 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2636 FLASH_IMAGE_MAX_SIZE_g3},
2637 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2638 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2639 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2640 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2641 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2642 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2643 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2644 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2645 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2646 FLASH_IMAGE_MAX_SIZE_g3},
2647 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2648 FLASH_IMAGE_MAX_SIZE_g3},
2649 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002650 FLASH_IMAGE_MAX_SIZE_g3},
2651 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2652 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002653 };
Joe Perches215faf92010-12-21 02:16:10 -08002654 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002655 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2656 FLASH_IMAGE_MAX_SIZE_g2},
2657 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2658 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2659 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2660 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2661 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2662 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2663 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2664 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2665 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2666 FLASH_IMAGE_MAX_SIZE_g2},
2667 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2668 FLASH_IMAGE_MAX_SIZE_g2},
2669 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2670 FLASH_IMAGE_MAX_SIZE_g2}
2671 };
2672
2673 if (adapter->generation == BE_GEN3) {
2674 pflashcomp = gen3_flash_types;
2675 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002676 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002677 } else {
2678 pflashcomp = gen2_flash_types;
2679 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002680 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002681 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002682 for (i = 0; i < num_comp; i++) {
2683 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2684 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2685 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002686 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2687 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002688 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2689 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002690 continue;
2691 p = fw->data;
2692 p += filehdr_size + pflashcomp[i].offset
2693 + (num_of_images * sizeof(struct image_hdr));
2694 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002695 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002696 total_bytes = pflashcomp[i].size;
2697 while (total_bytes) {
2698 if (total_bytes > 32*1024)
2699 num_bytes = 32*1024;
2700 else
2701 num_bytes = total_bytes;
2702 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002703
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002704 if (!total_bytes)
2705 flash_op = FLASHROM_OPER_FLASH;
2706 else
2707 flash_op = FLASHROM_OPER_SAVE;
2708 memcpy(req->params.data_buf, p, num_bytes);
2709 p += num_bytes;
2710 status = be_cmd_write_flashrom(adapter, flash_cmd,
2711 pflashcomp[i].optype, flash_op, num_bytes);
2712 if (status) {
2713 dev_err(&adapter->pdev->dev,
2714 "cmd to write to flash rom failed.\n");
2715 return -1;
2716 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002717 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002718 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002719 return 0;
2720}
2721
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002722static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2723{
2724 if (fhdr == NULL)
2725 return 0;
2726 if (fhdr->build[0] == '3')
2727 return BE_GEN3;
2728 else if (fhdr->build[0] == '2')
2729 return BE_GEN2;
2730 else
2731 return 0;
2732}
2733
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002734static int lancer_fw_download(struct be_adapter *adapter,
2735 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002736{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002737#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2738#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2739 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002740 const u8 *data_ptr = NULL;
2741 u8 *dest_image_ptr = NULL;
2742 size_t image_size = 0;
2743 u32 chunk_size = 0;
2744 u32 data_written = 0;
2745 u32 offset = 0;
2746 int status = 0;
2747 u8 add_status = 0;
2748
2749 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2750 dev_err(&adapter->pdev->dev,
2751 "FW Image not properly aligned. "
2752 "Length must be 4 byte aligned.\n");
2753 status = -EINVAL;
2754 goto lancer_fw_exit;
2755 }
2756
2757 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2758 + LANCER_FW_DOWNLOAD_CHUNK;
2759 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2760 &flash_cmd.dma, GFP_KERNEL);
2761 if (!flash_cmd.va) {
2762 status = -ENOMEM;
2763 dev_err(&adapter->pdev->dev,
2764 "Memory allocation failure while flashing\n");
2765 goto lancer_fw_exit;
2766 }
2767
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002768 dest_image_ptr = flash_cmd.va +
2769 sizeof(struct lancer_cmd_req_write_object);
2770 image_size = fw->size;
2771 data_ptr = fw->data;
2772
2773 while (image_size) {
2774 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2775
2776 /* Copy the image chunk content. */
2777 memcpy(dest_image_ptr, data_ptr, chunk_size);
2778
2779 status = lancer_cmd_write_object(adapter, &flash_cmd,
2780 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2781 &data_written, &add_status);
2782
2783 if (status)
2784 break;
2785
2786 offset += data_written;
2787 data_ptr += data_written;
2788 image_size -= data_written;
2789 }
2790
2791 if (!status) {
2792 /* Commit the FW written */
2793 status = lancer_cmd_write_object(adapter, &flash_cmd,
2794 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2795 &data_written, &add_status);
2796 }
2797
2798 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2799 flash_cmd.dma);
2800 if (status) {
2801 dev_err(&adapter->pdev->dev,
2802 "Firmware load error. "
2803 "Status code: 0x%x Additional Status: 0x%x\n",
2804 status, add_status);
2805 goto lancer_fw_exit;
2806 }
2807
2808 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2809lancer_fw_exit:
2810 return status;
2811}
2812
2813static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2814{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002815 struct flash_file_hdr_g2 *fhdr;
2816 struct flash_file_hdr_g3 *fhdr3;
2817 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002818 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002819 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002820 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002821
2822 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002823 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002824
Ajit Khaparde84517482009-09-04 03:12:16 +00002825 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002826 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2827 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002828 if (!flash_cmd.va) {
2829 status = -ENOMEM;
2830 dev_err(&adapter->pdev->dev,
2831 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002832 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002833 }
2834
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002835 if ((adapter->generation == BE_GEN3) &&
2836 (get_ufigen_type(fhdr) == BE_GEN3)) {
2837 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002838 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2839 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002840 img_hdr_ptr = (struct image_hdr *) (fw->data +
2841 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002842 i * sizeof(struct image_hdr)));
2843 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2844 status = be_flash_data(adapter, fw, &flash_cmd,
2845 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002846 }
2847 } else if ((adapter->generation == BE_GEN2) &&
2848 (get_ufigen_type(fhdr) == BE_GEN2)) {
2849 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2850 } else {
2851 dev_err(&adapter->pdev->dev,
2852 "UFI and Interface are not compatible for flashing\n");
2853 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002854 }
2855
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002856 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2857 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002858 if (status) {
2859 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002860 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002861 }
2862
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002863 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002864
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002865be_fw_exit:
2866 return status;
2867}
2868
2869int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2870{
2871 const struct firmware *fw;
2872 int status;
2873
2874 if (!netif_running(adapter->netdev)) {
2875 dev_err(&adapter->pdev->dev,
2876 "Firmware load not allowed (interface is down)\n");
2877 return -1;
2878 }
2879
2880 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2881 if (status)
2882 goto fw_exit;
2883
2884 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2885
2886 if (lancer_chip(adapter))
2887 status = lancer_fw_download(adapter, fw);
2888 else
2889 status = be_fw_download(adapter, fw);
2890
Ajit Khaparde84517482009-09-04 03:12:16 +00002891fw_exit:
2892 release_firmware(fw);
2893 return status;
2894}
2895
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002896static struct net_device_ops be_netdev_ops = {
2897 .ndo_open = be_open,
2898 .ndo_stop = be_close,
2899 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002900 .ndo_set_rx_mode = be_set_multicast_list,
2901 .ndo_set_mac_address = be_mac_addr_set,
2902 .ndo_change_mtu = be_change_mtu,
2903 .ndo_validate_addr = eth_validate_addr,
2904 .ndo_vlan_rx_register = be_vlan_register,
2905 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2906 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002907 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002908 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002909 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002910 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002911};
2912
2913static void be_netdev_init(struct net_device *netdev)
2914{
2915 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002916 struct be_rx_obj *rxo;
2917 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002918
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002919 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002920 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2921 NETIF_F_HW_VLAN_TX;
2922 if (be_multi_rxq(adapter))
2923 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002924
2925 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002926 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002927
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002928 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002929 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002930
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002931 netdev->flags |= IFF_MULTICAST;
2932
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002933 /* Default settings for Rx and Tx flow control */
2934 adapter->rx_fc = true;
2935 adapter->tx_fc = true;
2936
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002937 netif_set_gso_max_size(netdev, 65535);
2938
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002939 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2940
2941 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2942
Sathya Perla3abcded2010-10-03 22:12:27 -07002943 for_all_rx_queues(adapter, rxo, i)
2944 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2945 BE_NAPI_WEIGHT);
2946
Sathya Perla5fb379e2009-06-18 00:02:59 +00002947 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002948 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002949}
2950
2951static void be_unmap_pci_bars(struct be_adapter *adapter)
2952{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002953 if (adapter->csr)
2954 iounmap(adapter->csr);
2955 if (adapter->db)
2956 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002957 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002958 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002959}
2960
2961static int be_map_pci_bars(struct be_adapter *adapter)
2962{
2963 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002964 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002965
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002966 if (lancer_chip(adapter)) {
2967 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2968 pci_resource_len(adapter->pdev, 0));
2969 if (addr == NULL)
2970 return -ENOMEM;
2971 adapter->db = addr;
2972 return 0;
2973 }
2974
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002975 if (be_physfn(adapter)) {
2976 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2977 pci_resource_len(adapter->pdev, 2));
2978 if (addr == NULL)
2979 return -ENOMEM;
2980 adapter->csr = addr;
2981 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002982
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002983 if (adapter->generation == BE_GEN2) {
2984 pcicfg_reg = 1;
2985 db_reg = 4;
2986 } else {
2987 pcicfg_reg = 0;
2988 if (be_physfn(adapter))
2989 db_reg = 4;
2990 else
2991 db_reg = 0;
2992 }
2993 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2994 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002995 if (addr == NULL)
2996 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002997 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002999 if (be_physfn(adapter)) {
3000 addr = ioremap_nocache(
3001 pci_resource_start(adapter->pdev, pcicfg_reg),
3002 pci_resource_len(adapter->pdev, pcicfg_reg));
3003 if (addr == NULL)
3004 goto pci_map_err;
3005 adapter->pcicfg = addr;
3006 } else
3007 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003008
3009 return 0;
3010pci_map_err:
3011 be_unmap_pci_bars(adapter);
3012 return -ENOMEM;
3013}
3014
3015
3016static void be_ctrl_cleanup(struct be_adapter *adapter)
3017{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003018 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019
3020 be_unmap_pci_bars(adapter);
3021
3022 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003023 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3024 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003025
3026 mem = &adapter->mc_cmd_mem;
3027 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003028 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3029 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003030}
3031
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003032static int be_ctrl_init(struct be_adapter *adapter)
3033{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003034 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3035 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003036 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003037 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003038
3039 status = be_map_pci_bars(adapter);
3040 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003041 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003042
3043 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003044 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3045 mbox_mem_alloc->size,
3046 &mbox_mem_alloc->dma,
3047 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003048 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003049 status = -ENOMEM;
3050 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003051 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00003052
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3054 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3055 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3056 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003057
3058 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003059 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3060 mc_cmd_mem->size, &mc_cmd_mem->dma,
3061 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003062 if (mc_cmd_mem->va == NULL) {
3063 status = -ENOMEM;
3064 goto free_mbox;
3065 }
3066 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3067
Ivan Vecera29849612010-12-14 05:43:19 +00003068 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003069 spin_lock_init(&adapter->mcc_lock);
3070 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003072 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003073 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003074 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003075
3076free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003077 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3078 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003079
3080unmap_pci_bars:
3081 be_unmap_pci_bars(adapter);
3082
3083done:
3084 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003085}
3086
3087static void be_stats_cleanup(struct be_adapter *adapter)
3088{
Sathya Perla3abcded2010-10-03 22:12:27 -07003089 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090
3091 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003092 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3093 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003094}
3095
3096static int be_stats_init(struct be_adapter *adapter)
3097{
Sathya Perla3abcded2010-10-03 22:12:27 -07003098 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003099
Selvin Xavier005d5692011-05-16 07:36:35 +00003100 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003101 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003102 } else {
3103 if (lancer_chip(adapter))
3104 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3105 else
3106 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3107 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003108 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3109 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003110 if (cmd->va == NULL)
3111 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003112 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003113 return 0;
3114}
3115
3116static void __devexit be_remove(struct pci_dev *pdev)
3117{
3118 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120 if (!adapter)
3121 return;
3122
Somnath Koturf203af72010-10-25 23:01:03 +00003123 cancel_delayed_work_sync(&adapter->work);
3124
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003125 unregister_netdev(adapter->netdev);
3126
Sathya Perla5fb379e2009-06-18 00:02:59 +00003127 be_clear(adapter);
3128
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003129 be_stats_cleanup(adapter);
3130
3131 be_ctrl_cleanup(adapter);
3132
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003133 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003134 be_sriov_disable(adapter);
3135
Sathya Perla8d56ff12009-11-22 22:02:26 +00003136 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003137
3138 pci_set_drvdata(pdev, NULL);
3139 pci_release_regions(pdev);
3140 pci_disable_device(pdev);
3141
3142 free_netdev(adapter->netdev);
3143}
3144
Sathya Perla2243e2e2009-11-22 22:02:03 +00003145static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003146{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003147 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003148 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003149
Sathya Perla8788fdc2009-07-27 22:52:03 +00003150 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003151 if (status)
3152 return status;
3153
Sathya Perla3abcded2010-10-03 22:12:27 -07003154 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3155 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003156 if (status)
3157 return status;
3158
3159 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003160
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003161 /* A default permanent address is given to each VF for Lancer*/
3162 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003163 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003164 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003165
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003166 if (status)
3167 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003168
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003169 if (!is_valid_ether_addr(mac))
3170 return -EADDRNOTAVAIL;
3171
3172 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3173 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3174 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003175
Ajit Khaparde3486be22010-07-23 02:04:54 +00003176 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003177 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3178 else
3179 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3180
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003181 status = be_cmd_get_cntl_attributes(adapter);
3182 if (status)
3183 return status;
3184
Sathya Perla2e588f82011-03-11 02:49:26 +00003185 be_cmd_check_native_mode(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003186 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003187}
3188
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003189static int be_dev_family_check(struct be_adapter *adapter)
3190{
3191 struct pci_dev *pdev = adapter->pdev;
3192 u32 sli_intf = 0, if_type;
3193
3194 switch (pdev->device) {
3195 case BE_DEVICE_ID1:
3196 case OC_DEVICE_ID1:
3197 adapter->generation = BE_GEN2;
3198 break;
3199 case BE_DEVICE_ID2:
3200 case OC_DEVICE_ID2:
3201 adapter->generation = BE_GEN3;
3202 break;
3203 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003204 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003205 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3206 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3207 SLI_INTF_IF_TYPE_SHIFT;
3208
3209 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3210 if_type != 0x02) {
3211 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3212 return -EINVAL;
3213 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003214 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3215 SLI_INTF_FAMILY_SHIFT);
3216 adapter->generation = BE_GEN3;
3217 break;
3218 default:
3219 adapter->generation = 0;
3220 }
3221 return 0;
3222}
3223
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003224static int lancer_wait_ready(struct be_adapter *adapter)
3225{
3226#define SLIPORT_READY_TIMEOUT 500
3227 u32 sliport_status;
3228 int status = 0, i;
3229
3230 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3231 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3232 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3233 break;
3234
3235 msleep(20);
3236 }
3237
3238 if (i == SLIPORT_READY_TIMEOUT)
3239 status = -1;
3240
3241 return status;
3242}
3243
3244static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3245{
3246 int status;
3247 u32 sliport_status, err, reset_needed;
3248 status = lancer_wait_ready(adapter);
3249 if (!status) {
3250 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3251 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3252 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3253 if (err && reset_needed) {
3254 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3255 adapter->db + SLIPORT_CONTROL_OFFSET);
3256
3257 /* check adapter has corrected the error */
3258 status = lancer_wait_ready(adapter);
3259 sliport_status = ioread32(adapter->db +
3260 SLIPORT_STATUS_OFFSET);
3261 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3262 SLIPORT_STATUS_RN_MASK);
3263 if (status || sliport_status)
3264 status = -1;
3265 } else if (err || reset_needed) {
3266 status = -1;
3267 }
3268 }
3269 return status;
3270}
3271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003272static int __devinit be_probe(struct pci_dev *pdev,
3273 const struct pci_device_id *pdev_id)
3274{
3275 int status = 0;
3276 struct be_adapter *adapter;
3277 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003278
3279 status = pci_enable_device(pdev);
3280 if (status)
3281 goto do_none;
3282
3283 status = pci_request_regions(pdev, DRV_NAME);
3284 if (status)
3285 goto disable_dev;
3286 pci_set_master(pdev);
3287
3288 netdev = alloc_etherdev(sizeof(struct be_adapter));
3289 if (netdev == NULL) {
3290 status = -ENOMEM;
3291 goto rel_reg;
3292 }
3293 adapter = netdev_priv(netdev);
3294 adapter->pdev = pdev;
3295 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003296
3297 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003298 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003299 goto free_netdev;
3300
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003301 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003302 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003303
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003304 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003305 if (!status) {
3306 netdev->features |= NETIF_F_HIGHDMA;
3307 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003308 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309 if (status) {
3310 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3311 goto free_netdev;
3312 }
3313 }
3314
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003315 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003316 if (adapter->sriov_enabled) {
3317 adapter->vf_cfg = kcalloc(num_vfs,
3318 sizeof(struct be_vf_cfg), GFP_KERNEL);
3319
3320 if (!adapter->vf_cfg)
3321 goto free_netdev;
3322 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003323
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003324 status = be_ctrl_init(adapter);
3325 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003326 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003328 if (lancer_chip(adapter)) {
3329 status = lancer_test_and_set_rdy_state(adapter);
3330 if (status) {
3331 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003332 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003333 }
3334 }
3335
Sathya Perla2243e2e2009-11-22 22:02:03 +00003336 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003337 if (be_physfn(adapter)) {
3338 status = be_cmd_POST(adapter);
3339 if (status)
3340 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003341 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003342
3343 /* tell fw we're ready to fire cmds */
3344 status = be_cmd_fw_init(adapter);
3345 if (status)
3346 goto ctrl_clean;
3347
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003348 status = be_cmd_reset_function(adapter);
3349 if (status)
3350 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003351
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003352 status = be_stats_init(adapter);
3353 if (status)
3354 goto ctrl_clean;
3355
Sathya Perla2243e2e2009-11-22 22:02:03 +00003356 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003357 if (status)
3358 goto stats_clean;
3359
Sathya Perla3abcded2010-10-03 22:12:27 -07003360 be_msix_enable(adapter);
3361
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003362 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003363
Sathya Perla5fb379e2009-06-18 00:02:59 +00003364 status = be_setup(adapter);
3365 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003366 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003367
Sathya Perla3abcded2010-10-03 22:12:27 -07003368 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003369 status = register_netdev(netdev);
3370 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003371 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003372 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003373
Ajit Khapardee6319362011-02-11 13:35:41 +00003374 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003375 u8 mac_speed;
3376 bool link_up;
3377 u16 vf, lnk_speed;
3378
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003379 if (!lancer_chip(adapter)) {
3380 status = be_vf_eth_addr_config(adapter);
3381 if (status)
3382 goto unreg_netdev;
3383 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003384
3385 for (vf = 0; vf < num_vfs; vf++) {
3386 status = be_cmd_link_status_query(adapter, &link_up,
3387 &mac_speed, &lnk_speed, vf + 1);
3388 if (!status)
3389 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3390 else
3391 goto unreg_netdev;
3392 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003393 }
3394
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003395 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003396 /* By default all priorities are enabled.
3397 * Needed in case of no GRP5 evt support
3398 */
3399 adapter->vlan_prio_bmap = 0xff;
3400
Somnath Koturf203af72010-10-25 23:01:03 +00003401 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003402 return 0;
3403
Ajit Khapardee6319362011-02-11 13:35:41 +00003404unreg_netdev:
3405 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003406unsetup:
3407 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003408msix_disable:
3409 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003410stats_clean:
3411 be_stats_cleanup(adapter);
3412ctrl_clean:
3413 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003414free_vf_cfg:
3415 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003416free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003417 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003418 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003419 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003420rel_reg:
3421 pci_release_regions(pdev);
3422disable_dev:
3423 pci_disable_device(pdev);
3424do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003425 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003426 return status;
3427}
3428
3429static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3430{
3431 struct be_adapter *adapter = pci_get_drvdata(pdev);
3432 struct net_device *netdev = adapter->netdev;
3433
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003434 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003435 if (adapter->wol)
3436 be_setup_wol(adapter, true);
3437
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003438 netif_device_detach(netdev);
3439 if (netif_running(netdev)) {
3440 rtnl_lock();
3441 be_close(netdev);
3442 rtnl_unlock();
3443 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003444 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003445 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003447 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003448 pci_save_state(pdev);
3449 pci_disable_device(pdev);
3450 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3451 return 0;
3452}
3453
3454static int be_resume(struct pci_dev *pdev)
3455{
3456 int status = 0;
3457 struct be_adapter *adapter = pci_get_drvdata(pdev);
3458 struct net_device *netdev = adapter->netdev;
3459
3460 netif_device_detach(netdev);
3461
3462 status = pci_enable_device(pdev);
3463 if (status)
3464 return status;
3465
3466 pci_set_power_state(pdev, 0);
3467 pci_restore_state(pdev);
3468
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003469 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003470 /* tell fw we're ready to fire cmds */
3471 status = be_cmd_fw_init(adapter);
3472 if (status)
3473 return status;
3474
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003475 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003476 if (netif_running(netdev)) {
3477 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003478 be_open(netdev);
3479 rtnl_unlock();
3480 }
3481 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003482
3483 if (adapter->wol)
3484 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003485
3486 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003487 return 0;
3488}
3489
Sathya Perla82456b02010-02-17 01:35:37 +00003490/*
3491 * An FLR will stop BE from DMAing any data.
3492 */
3493static void be_shutdown(struct pci_dev *pdev)
3494{
3495 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003496
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003497 if (!adapter)
3498 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003499
Sathya Perla0f4a6822011-03-21 20:49:28 +00003500 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003501
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003502 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003503
Sathya Perla82456b02010-02-17 01:35:37 +00003504 if (adapter->wol)
3505 be_setup_wol(adapter, true);
3506
Ajit Khaparde57841862011-04-06 18:08:43 +00003507 be_cmd_reset_function(adapter);
3508
Sathya Perla82456b02010-02-17 01:35:37 +00003509 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003510}
3511
Sathya Perlacf588472010-02-14 21:22:01 +00003512static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3513 pci_channel_state_t state)
3514{
3515 struct be_adapter *adapter = pci_get_drvdata(pdev);
3516 struct net_device *netdev = adapter->netdev;
3517
3518 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3519
3520 adapter->eeh_err = true;
3521
3522 netif_device_detach(netdev);
3523
3524 if (netif_running(netdev)) {
3525 rtnl_lock();
3526 be_close(netdev);
3527 rtnl_unlock();
3528 }
3529 be_clear(adapter);
3530
3531 if (state == pci_channel_io_perm_failure)
3532 return PCI_ERS_RESULT_DISCONNECT;
3533
3534 pci_disable_device(pdev);
3535
3536 return PCI_ERS_RESULT_NEED_RESET;
3537}
3538
3539static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3540{
3541 struct be_adapter *adapter = pci_get_drvdata(pdev);
3542 int status;
3543
3544 dev_info(&adapter->pdev->dev, "EEH reset\n");
3545 adapter->eeh_err = false;
3546
3547 status = pci_enable_device(pdev);
3548 if (status)
3549 return PCI_ERS_RESULT_DISCONNECT;
3550
3551 pci_set_master(pdev);
3552 pci_set_power_state(pdev, 0);
3553 pci_restore_state(pdev);
3554
3555 /* Check if card is ok and fw is ready */
3556 status = be_cmd_POST(adapter);
3557 if (status)
3558 return PCI_ERS_RESULT_DISCONNECT;
3559
3560 return PCI_ERS_RESULT_RECOVERED;
3561}
3562
3563static void be_eeh_resume(struct pci_dev *pdev)
3564{
3565 int status = 0;
3566 struct be_adapter *adapter = pci_get_drvdata(pdev);
3567 struct net_device *netdev = adapter->netdev;
3568
3569 dev_info(&adapter->pdev->dev, "EEH resume\n");
3570
3571 pci_save_state(pdev);
3572
3573 /* tell fw we're ready to fire cmds */
3574 status = be_cmd_fw_init(adapter);
3575 if (status)
3576 goto err;
3577
3578 status = be_setup(adapter);
3579 if (status)
3580 goto err;
3581
3582 if (netif_running(netdev)) {
3583 status = be_open(netdev);
3584 if (status)
3585 goto err;
3586 }
3587 netif_device_attach(netdev);
3588 return;
3589err:
3590 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003591}
3592
3593static struct pci_error_handlers be_eeh_handlers = {
3594 .error_detected = be_eeh_err_detected,
3595 .slot_reset = be_eeh_reset,
3596 .resume = be_eeh_resume,
3597};
3598
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003599static struct pci_driver be_driver = {
3600 .name = DRV_NAME,
3601 .id_table = be_dev_ids,
3602 .probe = be_probe,
3603 .remove = be_remove,
3604 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003605 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003606 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003607 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003608};
3609
3610static int __init be_init_module(void)
3611{
Joe Perches8e95a202009-12-03 07:58:21 +00003612 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3613 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003614 printk(KERN_WARNING DRV_NAME
3615 " : Module param rx_frag_size must be 2048/4096/8192."
3616 " Using 2048\n");
3617 rx_frag_size = 2048;
3618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003619
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003620 return pci_register_driver(&be_driver);
3621}
3622module_init(be_init_module);
3623
3624static void __exit be_exit_module(void)
3625{
3626 pci_unregister_driver(&be_driver);
3627}
3628module_exit(be_exit_module);