blob: 5ca06b0d2d0cd436f06cdd79925680c5b8f3dcfa [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla3abcded2010-10-03 22:12:27 -070036static bool multi_rxq = true;
37module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
51static char *ue_status_low_desc[] = {
52 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
86static char *ue_status_hi_desc[] = {
87 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
110 "NETC"
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
121static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
122{
123 struct be_dma_mem *mem = &q->dma_mem;
124 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000125 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
126 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127}
128
129static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
130 u16 len, u16 entry_size)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
133
134 memset(q, 0, sizeof(*q));
135 q->len = len;
136 q->entry_size = entry_size;
137 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000138 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
139 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700140 if (!mem->va)
141 return -1;
142 memset(mem->va, 0, mem->size);
143 return 0;
144}
145
Sathya Perla8788fdc2009-07-27 22:52:03 +0000146static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000148 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 u32 reg = ioread32(addr);
150 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151
Sathya Perlacf588472010-02-14 21:22:01 +0000152 if (adapter->eeh_err)
153 return;
154
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 iowrite32(reg, addr);
163}
164
Sathya Perla8788fdc2009-07-27 22:52:03 +0000165static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166{
167 u32 val = 0;
168 val |= qid & DB_RQ_RING_ID_MASK;
169 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000170
171 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000172 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173}
174
Sathya Perla8788fdc2009-07-27 22:52:03 +0000175static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176{
177 u32 val = 0;
178 val |= qid & DB_TXULP_RING_ID_MASK;
179 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000180
181 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000182 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700183}
184
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186 bool arm, bool clear_int, u16 num_popped)
187{
188 u32 val = 0;
189 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000190 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
191 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000192
193 if (adapter->eeh_err)
194 return;
195
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196 if (arm)
197 val |= 1 << DB_EQ_REARM_SHIFT;
198 if (clear_int)
199 val |= 1 << DB_EQ_CLR_SHIFT;
200 val |= 1 << DB_EQ_EVNT_SHIFT;
201 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000202 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203}
204
Sathya Perla8788fdc2009-07-27 22:52:03 +0000205void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
208 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000209 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
210 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000211
212 if (adapter->eeh_err)
213 return;
214
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215 if (arm)
216 val |= 1 << DB_CQ_REARM_SHIFT;
217 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000218 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219}
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221static int be_mac_addr_set(struct net_device *netdev, void *p)
222{
223 struct be_adapter *adapter = netdev_priv(netdev);
224 struct sockaddr *addr = p;
225 int status = 0;
226
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000227 if (!is_valid_ether_addr(addr->sa_data))
228 return -EADDRNOTAVAIL;
229
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000230 /* MAC addr configuration will be done in hardware for VFs
231 * by their corresponding PFs. Just copy to netdev addr here
232 */
233 if (!be_physfn(adapter))
234 goto netdev_addr;
235
Ajit Khapardef8617e02011-02-11 13:36:37 +0000236 status = be_cmd_pmac_del(adapter, adapter->if_handle,
237 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000238 if (status)
239 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240
Sathya Perlaa65027e2009-08-17 00:58:04 +0000241 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000242 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000243netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244 if (!status)
245 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
246
247 return status;
248}
249
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000250static void populate_be2_stats(struct be_adapter *adapter)
251{
252
253 struct be_drv_stats *drvs = &adapter->drv_stats;
254 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
255 struct be_port_rxf_stats_v0 *port_stats =
256 be_port_rxf_stats_from_cmd(adapter);
257 struct be_rxf_stats_v0 *rxf_stats =
258 be_rxf_stats_from_cmd(adapter);
259
260 drvs->rx_pause_frames = port_stats->rx_pause_frames;
261 drvs->rx_crc_errors = port_stats->rx_crc_errors;
262 drvs->rx_control_frames = port_stats->rx_control_frames;
263 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
264 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
265 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
266 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
267 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
268 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
269 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
270 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
271 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
272 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
273 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
274 drvs->rx_input_fifo_overflow_drop =
275 port_stats->rx_input_fifo_overflow;
276 drvs->rx_dropped_header_too_small =
277 port_stats->rx_dropped_header_too_small;
278 drvs->rx_address_match_errors =
279 port_stats->rx_address_match_errors;
280 drvs->rx_alignment_symbol_errors =
281 port_stats->rx_alignment_symbol_errors;
282
283 drvs->tx_pauseframes = port_stats->tx_pauseframes;
284 drvs->tx_controlframes = port_stats->tx_controlframes;
285
286 if (adapter->port_num)
287 drvs->jabber_events =
288 rxf_stats->port1_jabber_events;
289 else
290 drvs->jabber_events =
291 rxf_stats->port0_jabber_events;
292 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
293 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
294 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
295 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
296 drvs->forwarded_packets = rxf_stats->forwarded_packets;
297 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
298 drvs->rx_drops_no_tpre_descr =
299 rxf_stats->rx_drops_no_tpre_descr;
300 drvs->rx_drops_too_many_frags =
301 rxf_stats->rx_drops_too_many_frags;
302 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
303}
304
305static void populate_be3_stats(struct be_adapter *adapter)
306{
307 struct be_drv_stats *drvs = &adapter->drv_stats;
308 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
309
310 struct be_rxf_stats_v1 *rxf_stats =
311 be_rxf_stats_from_cmd(adapter);
312 struct be_port_rxf_stats_v1 *port_stats =
313 be_port_rxf_stats_from_cmd(adapter);
314
315 drvs->rx_priority_pause_frames = 0;
316 drvs->pmem_fifo_overflow_drop = 0;
317 drvs->rx_pause_frames = port_stats->rx_pause_frames;
318 drvs->rx_crc_errors = port_stats->rx_crc_errors;
319 drvs->rx_control_frames = port_stats->rx_control_frames;
320 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
321 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
322 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
323 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
324 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
325 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
326 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
327 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
328 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
329 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
330 drvs->rx_dropped_header_too_small =
331 port_stats->rx_dropped_header_too_small;
332 drvs->rx_input_fifo_overflow_drop =
333 port_stats->rx_input_fifo_overflow_drop;
334 drvs->rx_address_match_errors =
335 port_stats->rx_address_match_errors;
336 drvs->rx_alignment_symbol_errors =
337 port_stats->rx_alignment_symbol_errors;
338 drvs->rxpp_fifo_overflow_drop =
339 port_stats->rxpp_fifo_overflow_drop;
340 drvs->tx_pauseframes = port_stats->tx_pauseframes;
341 drvs->tx_controlframes = port_stats->tx_controlframes;
342 drvs->jabber_events = port_stats->jabber_events;
343 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347 drvs->forwarded_packets = rxf_stats->forwarded_packets;
348 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
349 drvs->rx_drops_no_tpre_descr =
350 rxf_stats->rx_drops_no_tpre_descr;
351 drvs->rx_drops_too_many_frags =
352 rxf_stats->rx_drops_too_many_frags;
353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
Selvin Xavier005d5692011-05-16 07:36:35 +0000356static void populate_lancer_stats(struct be_adapter *adapter)
357{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358
Selvin Xavier005d5692011-05-16 07:36:35 +0000359 struct be_drv_stats *drvs = &adapter->drv_stats;
360 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
361 (adapter);
362 drvs->rx_priority_pause_frames = 0;
363 drvs->pmem_fifo_overflow_drop = 0;
364 drvs->rx_pause_frames =
Selvin Xavieraedfebb2011-06-06 02:27:13 +0000365 make_64bit_val(pport_stats->rx_pause_frames_hi,
366 pport_stats->rx_pause_frames_lo);
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
368 pport_stats->rx_crc_errors_lo);
369 drvs->rx_control_frames =
370 make_64bit_val(pport_stats->rx_control_frames_hi,
371 pport_stats->rx_control_frames_lo);
372 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long =
374 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
375 pport_stats->rx_frames_too_long_lo);
376 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
377 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
378 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
379 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
380 drvs->rx_dropped_tcp_length =
381 pport_stats->rx_dropped_invalid_tcp_length;
382 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
383 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
384 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
385 drvs->rx_dropped_header_too_small =
386 pport_stats->rx_dropped_header_too_small;
387 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
389 drvs->rx_alignment_symbol_errors =
390 make_64bit_val(pport_stats->rx_symbol_errors_hi,
391 pport_stats->rx_symbol_errors_lo);
392 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
393 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
394 pport_stats->tx_pause_frames_lo);
395 drvs->tx_controlframes =
396 make_64bit_val(pport_stats->tx_control_frames_hi,
397 pport_stats->tx_control_frames_lo);
398 drvs->jabber_events = pport_stats->rx_jabbers;
399 drvs->rx_drops_no_pbuf = 0;
400 drvs->rx_drops_no_txpb = 0;
401 drvs->rx_drops_no_erx_descr = 0;
402 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
403 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
404 pport_stats->num_forwards_lo);
405 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
406 pport_stats->rx_drops_mtu_lo);
407 drvs->rx_drops_no_tpre_descr = 0;
408 drvs->rx_drops_too_many_frags =
409 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
410 pport_stats->rx_drops_too_many_frags_lo);
411}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
413void be_parse_stats(struct be_adapter *adapter)
414{
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423}
424
Sathya Perlab31c50a2009-09-17 10:30:13 -0700425void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700426{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde78122a52009-10-07 03:11:20 -0700428 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700429 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000430 struct be_tx_obj *txo;
Sathya Perla6e533912011-06-26 20:40:48 +0000431 unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -0700432 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700433
Sathya Perla3abcded2010-10-03 22:12:27 -0700434 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6e533912011-06-26 20:40:48 +0000435 pkts += rx_stats(rxo)->rx_pkts;
436 bytes += rx_stats(rxo)->rx_bytes;
437 mcast += rx_stats(rxo)->rx_mcast_pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700438 /* no space in linux buffers: best possible approximation */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000439 if (adapter->generation == BE_GEN3) {
Selvin Xavier005d5692011-05-16 07:36:35 +0000440 if (!(lancer_chip(adapter))) {
Sathya Perla6e533912011-06-26 20:40:48 +0000441 struct be_erx_stats_v1 *erx =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000442 be_erx_stats_from_cmd(adapter);
Sathya Perla6e533912011-06-26 20:40:48 +0000443 drops += erx->rx_drops_no_fragments[rxo->q.id];
Selvin Xavier005d5692011-05-16 07:36:35 +0000444 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000445 } else {
Sathya Perla6e533912011-06-26 20:40:48 +0000446 struct be_erx_stats_v0 *erx =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 be_erx_stats_from_cmd(adapter);
Sathya Perla6e533912011-06-26 20:40:48 +0000448 drops += erx->rx_drops_no_fragments[rxo->q.id];
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 }
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 }
Sathya Perla6e533912011-06-26 20:40:48 +0000451 dev_stats->rx_packets = pkts;
452 dev_stats->rx_bytes = bytes;
453 dev_stats->multicast = mcast;
454 dev_stats->rx_dropped = drops;
Sathya Perla3abcded2010-10-03 22:12:27 -0700455
Sathya Perla6e533912011-06-26 20:40:48 +0000456 pkts = bytes = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000457 for_all_tx_queues(adapter, txo, i) {
Sathya Perla6e533912011-06-26 20:40:48 +0000458 pkts += tx_stats(txo)->be_tx_pkts;
459 bytes += tx_stats(txo)->be_tx_bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000460 }
Sathya Perla6e533912011-06-26 20:40:48 +0000461 dev_stats->tx_packets = pkts;
462 dev_stats->tx_bytes = bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463
464 /* bad pkts received */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000465 dev_stats->rx_errors = drvs->rx_crc_errors +
466 drvs->rx_alignment_symbol_errors +
467 drvs->rx_in_range_errors +
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long +
470 drvs->rx_dropped_too_small +
471 drvs->rx_dropped_too_short +
472 drvs->rx_dropped_header_too_small +
473 drvs->rx_dropped_tcp_length +
474 drvs->rx_dropped_runt +
475 drvs->rx_tcp_checksum_errs +
476 drvs->rx_ip_checksum_errs +
477 drvs->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700478
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479 /* detailed rx errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000480 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
481 drvs->rx_out_range_errors +
482 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000483
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000484 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700485
486 /* frame alignment errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000487 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000488
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700489 /* receiver fifo overrun */
490 /* drops_no_pbuf is no per i/f, it's per BE card */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
492 drvs->rx_input_fifo_overflow_drop +
493 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700494}
495
Sathya Perla8788fdc2009-07-27 22:52:03 +0000496void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498 struct net_device *netdev = adapter->netdev;
499
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000501 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000502 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000503 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700504 netif_carrier_on(netdev);
505 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000506 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000507 netif_carrier_off(netdev);
508 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000510 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512}
513
514/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700515static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516{
Sathya Perla3abcded2010-10-03 22:12:27 -0700517 struct be_eq_obj *rx_eq = &rxo->rx_eq;
518 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700519 ulong now = jiffies;
520 u32 eqd;
521
522 if (!rx_eq->enable_aic)
523 return;
524
525 /* Wrapped around */
526 if (time_before(now, stats->rx_fps_jiffies)) {
527 stats->rx_fps_jiffies = now;
528 return;
529 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530
531 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700532 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533 return;
534
Sathya Perla3abcded2010-10-03 22:12:27 -0700535 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700536 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537
Sathya Perla4097f662009-03-24 16:40:13 -0700538 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700539 stats->prev_rx_frags = stats->rx_frags;
540 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541 eqd = eqd << 3;
542 if (eqd > rx_eq->max_eqd)
543 eqd = rx_eq->max_eqd;
544 if (eqd < rx_eq->min_eqd)
545 eqd = rx_eq->min_eqd;
546 if (eqd < 10)
547 eqd = 0;
548 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000549 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700550
551 rx_eq->cur_eqd = eqd;
552}
553
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700554static u32 be_calc_rate(u64 bytes, unsigned long ticks)
555{
556 u64 rate = bytes;
557
558 do_div(rate, ticks / HZ);
559 rate <<= 3; /* bytes/sec -> bits/sec */
560 do_div(rate, 1000000ul); /* MB/Sec */
561
562 return rate;
563}
564
Sathya Perla3c8def92011-06-12 20:01:58 +0000565static void be_tx_rate_update(struct be_tx_obj *txo)
Sathya Perla4097f662009-03-24 16:40:13 -0700566{
Sathya Perla3c8def92011-06-12 20:01:58 +0000567 struct be_tx_stats *stats = tx_stats(txo);
Sathya Perla4097f662009-03-24 16:40:13 -0700568 ulong now = jiffies;
569
570 /* Wrapped around? */
571 if (time_before(now, stats->be_tx_jiffies)) {
572 stats->be_tx_jiffies = now;
573 return;
574 }
575
576 /* Update tx rate once in two seconds */
577 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700578 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
579 - stats->be_tx_bytes_prev,
580 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700581 stats->be_tx_jiffies = now;
582 stats->be_tx_bytes_prev = stats->be_tx_bytes;
583 }
584}
585
Sathya Perla3c8def92011-06-12 20:01:58 +0000586static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000587 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588{
Sathya Perla3c8def92011-06-12 20:01:58 +0000589 struct be_tx_stats *stats = tx_stats(txo);
590
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 stats->be_tx_reqs++;
592 stats->be_tx_wrbs += wrb_cnt;
593 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000594 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 if (stopped)
596 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597}
598
599/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
601 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700603 int cnt = (skb->len > skb->data_len);
604
605 cnt += skb_shinfo(skb)->nr_frags;
606
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607 /* to account for hdr wrb */
608 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000609 if (lancer_chip(adapter) || !(cnt & 1)) {
610 *dummy = false;
611 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 /* add a dummy to make it an even num */
613 cnt++;
614 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000615 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700616 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
617 return cnt;
618}
619
620static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
621{
622 wrb->frag_pa_hi = upper_32_bits(addr);
623 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
624 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
625}
626
Somnath Koturcc4ce022010-10-21 07:11:14 -0700627static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
628 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700630 u8 vlan_prio = 0;
631 u16 vlan_tag = 0;
632
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633 memset(hdr, 0, sizeof(*hdr));
634
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
636
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000637 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
639 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
640 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000641 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000642 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000643 if (lancer_chip(adapter) && adapter->sli_family ==
644 LANCER_A0_SLI_FAMILY) {
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
646 if (is_tcp_pkt(skb))
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
648 tcpcs, hdr, 1);
649 else if (is_udp_pkt(skb))
650 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
651 udpcs, hdr, 1);
652 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
654 if (is_tcp_pkt(skb))
655 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
656 else if (is_udp_pkt(skb))
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
658 }
659
Somnath Koturcc4ce022010-10-21 07:11:14 -0700660 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700662 vlan_tag = vlan_tx_tag_get(skb);
663 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
664 /* If vlan priority provided by OS is NOT in available bmap */
665 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
666 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
667 adapter->recommended_prio;
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 }
670
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
675}
676
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000677static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000678 bool unmap_single)
679{
680 dma_addr_t dma;
681
682 be_dws_le_to_cpu(wrb, sizeof(*wrb));
683
684 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000685 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000686 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000687 dma_unmap_single(dev, dma, wrb->frag_len,
688 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000689 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000690 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000691 }
692}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693
Sathya Perla3c8def92011-06-12 20:01:58 +0000694static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
696{
Sathya Perla7101e112010-03-22 20:41:12 +0000697 dma_addr_t busaddr;
698 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701 struct be_eth_wrb *wrb;
702 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000703 bool map_single = false;
704 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706 hdr = queue_head_node(txq);
707 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000708 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709
David S. Millerebc8d2a2009-06-09 01:01:31 -0700710 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700711 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
713 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000714 goto dma_err;
715 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700716 wrb = queue_head_node(txq);
717 wrb_fill(wrb, busaddr, len);
718 be_dws_cpu_to_le(wrb, sizeof(*wrb));
719 queue_head_inc(txq);
720 copied += len;
721 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
724 struct skb_frag_struct *frag =
725 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000726 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
727 frag->size, DMA_TO_DEVICE);
728 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000729 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700730 wrb = queue_head_node(txq);
731 wrb_fill(wrb, busaddr, frag->size);
732 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733 queue_head_inc(txq);
734 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 }
736
737 if (dummy_wrb) {
738 wrb = queue_head_node(txq);
739 wrb_fill(wrb, 0, 0);
740 be_dws_cpu_to_le(wrb, sizeof(*wrb));
741 queue_head_inc(txq);
742 }
743
Somnath Koturcc4ce022010-10-21 07:11:14 -0700744 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 be_dws_cpu_to_le(hdr, sizeof(*hdr));
746
747 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000748dma_err:
749 txq->head = map_head;
750 while (copied) {
751 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000752 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000753 map_single = false;
754 copied -= wrb->frag_len;
755 queue_head_inc(txq);
756 }
757 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758}
759
Stephen Hemminger613573252009-08-31 19:50:58 +0000760static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700761 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762{
763 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000764 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
765 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 u32 wrb_cnt = 0, copied = 0;
767 u32 start = txq->head;
768 bool dummy_wrb, stopped = false;
769
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000770 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771
Sathya Perla3c8def92011-06-12 20:01:58 +0000772 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000773 if (copied) {
774 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000775 BUG_ON(txo->sent_skb_list[start]);
776 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000778 /* Ensure txq has space for the next skb; Else stop the queue
779 * *BEFORE* ringing the tx doorbell, so that we serialze the
780 * tx compls of the current transmit which'll wake up the queue
781 */
Sathya Perla7101e112010-03-22 20:41:12 +0000782 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000783 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
784 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000785 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000786 stopped = true;
787 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000789 be_txq_notify(adapter, txq->id, wrb_cnt);
790
Sathya Perla3c8def92011-06-12 20:01:58 +0000791 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000792 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000793 } else {
794 txq->head = start;
795 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797 return NETDEV_TX_OK;
798}
799
800static int be_change_mtu(struct net_device *netdev, int new_mtu)
801{
802 struct be_adapter *adapter = netdev_priv(netdev);
803 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000804 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
805 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 dev_info(&adapter->pdev->dev,
807 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000808 BE_MIN_MTU,
809 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810 return -EINVAL;
811 }
812 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
813 netdev->mtu, new_mtu);
814 netdev->mtu = new_mtu;
815 return 0;
816}
817
818/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000819 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
820 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000822static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700823{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824 u16 vtag[BE_NUM_VLANS_SUPPORTED];
825 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000826 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000827 u32 if_handle;
828
829 if (vf) {
830 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
831 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
832 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
833 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834
Ajit Khaparde82903e42010-02-09 01:34:57 +0000835 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000837 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838 if (adapter->vlan_tag[i]) {
839 vtag[ntags] = cpu_to_le16(i);
840 ntags++;
841 }
842 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700843 status = be_cmd_vlan_config(adapter, adapter->if_handle,
844 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 status = be_cmd_vlan_config(adapter, adapter->if_handle,
847 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000849
Sathya Perlab31c50a2009-09-17 10:30:13 -0700850 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851}
852
853static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
854{
855 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700857 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858}
859
860static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
861{
862 struct be_adapter *adapter = netdev_priv(netdev);
863
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000864 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000865 if (!be_physfn(adapter))
866 return;
867
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000869 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000870 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700871}
872
873static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
874{
875 struct be_adapter *adapter = netdev_priv(netdev);
876
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000877 adapter->vlans_added--;
878 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
879
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000880 if (!be_physfn(adapter))
881 return;
882
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700883 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000884 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000885 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886}
887
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888static void be_set_multicast_list(struct net_device *netdev)
889{
890 struct be_adapter *adapter = netdev_priv(netdev);
891
892 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000893 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000894 adapter->promiscuous = true;
895 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000897
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300898 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000899 if (adapter->promiscuous) {
900 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000901 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000902 }
903
Sathya Perlae7b909a2009-11-22 22:01:10 +0000904 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000905 if (netdev->flags & IFF_ALLMULTI ||
906 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000907 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000908 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000909 goto done;
910 }
911
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000912 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800913 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000914done:
915 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700916}
917
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000918static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
919{
920 struct be_adapter *adapter = netdev_priv(netdev);
921 int status;
922
923 if (!adapter->sriov_enabled)
924 return -EPERM;
925
926 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
927 return -EINVAL;
928
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000929 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
930 status = be_cmd_pmac_del(adapter,
931 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000932 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000933
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000934 status = be_cmd_pmac_add(adapter, mac,
935 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000936 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000937
938 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000939 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
940 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000941 else
942 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
943
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000944 return status;
945}
946
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000947static int be_get_vf_config(struct net_device *netdev, int vf,
948 struct ifla_vf_info *vi)
949{
950 struct be_adapter *adapter = netdev_priv(netdev);
951
952 if (!adapter->sriov_enabled)
953 return -EPERM;
954
955 if (vf >= num_vfs)
956 return -EINVAL;
957
958 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000959 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000960 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000961 vi->qos = 0;
962 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
963
964 return 0;
965}
966
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000967static int be_set_vf_vlan(struct net_device *netdev,
968 int vf, u16 vlan, u8 qos)
969{
970 struct be_adapter *adapter = netdev_priv(netdev);
971 int status = 0;
972
973 if (!adapter->sriov_enabled)
974 return -EPERM;
975
976 if ((vf >= num_vfs) || (vlan > 4095))
977 return -EINVAL;
978
979 if (vlan) {
980 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
981 adapter->vlans_added++;
982 } else {
983 adapter->vf_cfg[vf].vf_vlan_tag = 0;
984 adapter->vlans_added--;
985 }
986
987 status = be_vid_config(adapter, true, vf);
988
989 if (status)
990 dev_info(&adapter->pdev->dev,
991 "VLAN %d config on VF %d failed\n", vlan, vf);
992 return status;
993}
994
Ajit Khapardee1d18732010-07-23 01:52:13 +0000995static int be_set_vf_tx_rate(struct net_device *netdev,
996 int vf, int rate)
997{
998 struct be_adapter *adapter = netdev_priv(netdev);
999 int status = 0;
1000
1001 if (!adapter->sriov_enabled)
1002 return -EPERM;
1003
1004 if ((vf >= num_vfs) || (rate < 0))
1005 return -EINVAL;
1006
1007 if (rate > 10000)
1008 rate = 10000;
1009
1010 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +00001011 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001012
1013 if (status)
1014 dev_info(&adapter->pdev->dev,
1015 "tx rate %d on VF %d failed\n", rate, vf);
1016 return status;
1017}
1018
Sathya Perla3abcded2010-10-03 22:12:27 -07001019static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020{
Sathya Perla3abcded2010-10-03 22:12:27 -07001021 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001022 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023
Sathya Perla4097f662009-03-24 16:40:13 -07001024 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001025 if (time_before(now, stats->rx_jiffies)) {
1026 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001027 return;
1028 }
1029
1030 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -07001031 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001032 return;
1033
Sathya Perla3abcded2010-10-03 22:12:27 -07001034 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1035 now - stats->rx_jiffies);
1036 stats->rx_jiffies = now;
1037 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -07001038}
1039
Sathya Perla3abcded2010-10-03 22:12:27 -07001040static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001041 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001042{
Sathya Perla3abcded2010-10-03 22:12:27 -07001043 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001044
Sathya Perla3abcded2010-10-03 22:12:27 -07001045 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001046 stats->rx_frags += rxcp->num_rcvd;
1047 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001048 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001049 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001050 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001051 if (rxcp->err)
1052 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053}
1054
Sathya Perla2e588f82011-03-11 02:49:26 +00001055static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001056{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001057 /* L4 checksum is not reliable for non TCP/UDP packets.
1058 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001059 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1060 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001061}
1062
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001064get_rx_page_info(struct be_adapter *adapter,
1065 struct be_rx_obj *rxo,
1066 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067{
1068 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001069 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070
Sathya Perla3abcded2010-10-03 22:12:27 -07001071 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072 BUG_ON(!rx_page_info->page);
1073
Ajit Khaparde205859a2010-02-09 01:34:21 +00001074 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001075 dma_unmap_page(&adapter->pdev->dev,
1076 dma_unmap_addr(rx_page_info, bus),
1077 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001078 rx_page_info->last_page_user = false;
1079 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080
1081 atomic_dec(&rxq->used);
1082 return rx_page_info;
1083}
1084
1085/* Throwaway the data in the Rx completion */
1086static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001087 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001088 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089{
Sathya Perla3abcded2010-10-03 22:12:27 -07001090 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001092 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001094 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001095 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001096 put_page(page_info->page);
1097 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001098 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099 }
1100}
1101
1102/*
1103 * skb_fill_rx_data forms a complete skb for an ether frame
1104 * indicated by rxcp.
1105 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001106static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001107 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001108{
Sathya Perla3abcded2010-10-03 22:12:27 -07001109 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001111 u16 i, j;
1112 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113 u8 *start;
1114
Sathya Perla2e588f82011-03-11 02:49:26 +00001115 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 start = page_address(page_info->page) + page_info->page_offset;
1117 prefetch(start);
1118
1119 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001120 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121
1122 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001123 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124 memcpy(skb->data, start, hdr_len);
1125 skb->len = curr_frag_len;
1126 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1127 /* Complete packet has now been moved to data */
1128 put_page(page_info->page);
1129 skb->data_len = 0;
1130 skb->tail += curr_frag_len;
1131 } else {
1132 skb_shinfo(skb)->nr_frags = 1;
1133 skb_shinfo(skb)->frags[0].page = page_info->page;
1134 skb_shinfo(skb)->frags[0].page_offset =
1135 page_info->page_offset + hdr_len;
1136 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1137 skb->data_len = curr_frag_len - hdr_len;
1138 skb->tail += hdr_len;
1139 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001140 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141
Sathya Perla2e588f82011-03-11 02:49:26 +00001142 if (rxcp->pkt_size <= rx_frag_size) {
1143 BUG_ON(rxcp->num_rcvd != 1);
1144 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145 }
1146
1147 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001148 index_inc(&rxcp->rxq_idx, rxq->len);
1149 remaining = rxcp->pkt_size - curr_frag_len;
1150 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1151 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1152 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001154 /* Coalesce all frags from the same physical page in one slot */
1155 if (page_info->page_offset == 0) {
1156 /* Fresh page */
1157 j++;
1158 skb_shinfo(skb)->frags[j].page = page_info->page;
1159 skb_shinfo(skb)->frags[j].page_offset =
1160 page_info->page_offset;
1161 skb_shinfo(skb)->frags[j].size = 0;
1162 skb_shinfo(skb)->nr_frags++;
1163 } else {
1164 put_page(page_info->page);
1165 }
1166
1167 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168 skb->len += curr_frag_len;
1169 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170
Sathya Perla2e588f82011-03-11 02:49:26 +00001171 remaining -= curr_frag_len;
1172 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001173 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001175 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176}
1177
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001178/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001180 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001181 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001183 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001185
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001186 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001187 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188 if (net_ratelimit())
1189 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001190 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191 return;
1192 }
1193
Sathya Perla2e588f82011-03-11 02:49:26 +00001194 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001196 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001197 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001198 else
1199 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200
1201 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001202 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001203 if (adapter->netdev->features & NETIF_F_RXHASH)
1204 skb->rxhash = rxcp->rss_hash;
1205
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206
Sathya Perla2e588f82011-03-11 02:49:26 +00001207 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001208 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209 kfree_skb(skb);
1210 return;
1211 }
Somnath Kotur6709d952011-05-04 22:40:46 +00001212 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1213 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214 } else {
1215 netif_receive_skb(skb);
1216 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217}
1218
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001219/* Process the RX completion indicated by rxcp when GRO is enabled */
1220static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001221 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001222 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223{
1224 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001225 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001226 struct be_queue_info *rxq = &rxo->q;
1227 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001228 u16 remaining, curr_frag_len;
1229 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001230
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001231 skb = napi_get_frags(&eq_obj->napi);
1232 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001233 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001234 return;
1235 }
1236
Sathya Perla2e588f82011-03-11 02:49:26 +00001237 remaining = rxcp->pkt_size;
1238 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1239 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240
1241 curr_frag_len = min(remaining, rx_frag_size);
1242
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001243 /* Coalesce all frags from the same physical page in one slot */
1244 if (i == 0 || page_info->page_offset == 0) {
1245 /* First frag or Fresh page */
1246 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001247 skb_shinfo(skb)->frags[j].page = page_info->page;
1248 skb_shinfo(skb)->frags[j].page_offset =
1249 page_info->page_offset;
1250 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001251 } else {
1252 put_page(page_info->page);
1253 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001254 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001255
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001257 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258 memset(page_info, 0, sizeof(*page_info));
1259 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001260 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001262 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001263 skb->len = rxcp->pkt_size;
1264 skb->data_len = rxcp->pkt_size;
1265 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001266 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001267 if (adapter->netdev->features & NETIF_F_RXHASH)
1268 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001269
Sathya Perla2e588f82011-03-11 02:49:26 +00001270 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001271 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001272 else
Somnath Kotur6709d952011-05-04 22:40:46 +00001273 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1274 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275}
1276
Sathya Perla2e588f82011-03-11 02:49:26 +00001277static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1278 struct be_eth_rx_compl *compl,
1279 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280{
Sathya Perla2e588f82011-03-11 02:49:26 +00001281 rxcp->pkt_size =
1282 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1283 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1284 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1285 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001286 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001287 rxcp->ip_csum =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1289 rxcp->l4_csum =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1291 rxcp->ipv6 =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1293 rxcp->rxq_idx =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1295 rxcp->num_rcvd =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1297 rxcp->pkt_type =
1298 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001299 rxcp->rss_hash =
1300 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001301 if (rxcp->vlanf) {
1302 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001303 compl);
1304 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1305 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001306 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001307}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308
Sathya Perla2e588f82011-03-11 02:49:26 +00001309static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1310 struct be_eth_rx_compl *compl,
1311 struct be_rx_compl_info *rxcp)
1312{
1313 rxcp->pkt_size =
1314 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1315 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1316 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1317 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001318 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001319 rxcp->ip_csum =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1321 rxcp->l4_csum =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1323 rxcp->ipv6 =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1325 rxcp->rxq_idx =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1327 rxcp->num_rcvd =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1329 rxcp->pkt_type =
1330 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001331 rxcp->rss_hash =
1332 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001333 if (rxcp->vlanf) {
1334 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001335 compl);
1336 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1337 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001338 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001339}
1340
1341static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1342{
1343 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1344 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1345 struct be_adapter *adapter = rxo->adapter;
1346
1347 /* For checking the valid bit it is Ok to use either definition as the
1348 * valid bit is at the same position in both v0 and v1 Rx compl */
1349 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350 return NULL;
1351
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001352 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001353 be_dws_le_to_cpu(compl, sizeof(*compl));
1354
1355 if (adapter->be3_native)
1356 be_parse_rx_compl_v1(adapter, compl, rxcp);
1357 else
1358 be_parse_rx_compl_v0(adapter, compl, rxcp);
1359
Sathya Perla15d72182011-03-21 20:49:26 +00001360 if (rxcp->vlanf) {
1361 /* vlanf could be wrongly set in some cards.
1362 * ignore if vtm is not set */
1363 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1364 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001365
Sathya Perla15d72182011-03-21 20:49:26 +00001366 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001367 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001368
David S. Miller3c709f82011-05-11 14:26:15 -04001369 if (((adapter->pvid & VLAN_VID_MASK) ==
1370 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1371 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001372 rxcp->vlanf = 0;
1373 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001374
1375 /* As the compl has been parsed, reset it; we wont touch it again */
1376 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377
Sathya Perla3abcded2010-10-03 22:12:27 -07001378 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379 return rxcp;
1380}
1381
Eric Dumazet1829b082011-03-01 05:48:12 +00001382static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001385
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001387 gfp |= __GFP_COMP;
1388 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389}
1390
1391/*
1392 * Allocate a page, split it to fragments of size rx_frag_size and post as
1393 * receive buffers to BE
1394 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001395static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396{
Sathya Perla3abcded2010-10-03 22:12:27 -07001397 struct be_adapter *adapter = rxo->adapter;
1398 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001399 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001400 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 struct page *pagep = NULL;
1402 struct be_eth_rx_d *rxd;
1403 u64 page_dmaaddr = 0, frag_dmaaddr;
1404 u32 posted, page_offset = 0;
1405
Sathya Perla3abcded2010-10-03 22:12:27 -07001406 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1408 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001409 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001411 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412 break;
1413 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001414 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1415 0, adapter->big_page_size,
1416 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 page_info->page_offset = 0;
1418 } else {
1419 get_page(pagep);
1420 page_info->page_offset = page_offset + rx_frag_size;
1421 }
1422 page_offset = page_info->page_offset;
1423 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001424 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1426
1427 rxd = queue_head_node(rxq);
1428 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1429 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430
1431 /* Any space left in the current big page for another frag? */
1432 if ((page_offset + rx_frag_size + rx_frag_size) >
1433 adapter->big_page_size) {
1434 pagep = NULL;
1435 page_info->last_page_user = true;
1436 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001437
1438 prev_page_info = page_info;
1439 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 page_info = &page_info_tbl[rxq->head];
1441 }
1442 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001443 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444
1445 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001447 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001448 } else if (atomic_read(&rxq->used) == 0) {
1449 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001450 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452}
1453
Sathya Perla5fb379e2009-06-18 00:02:59 +00001454static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1457
1458 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1459 return NULL;
1460
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001461 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1463
1464 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1465
1466 queue_tail_inc(tx_cq);
1467 return txcp;
1468}
1469
Sathya Perla3c8def92011-06-12 20:01:58 +00001470static u16 be_tx_compl_process(struct be_adapter *adapter,
1471 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472{
Sathya Perla3c8def92011-06-12 20:01:58 +00001473 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001474 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001475 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001477 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1478 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001480 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001482 sent_skbs[txq->tail] = NULL;
1483
1484 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001485 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001487 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001489 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001490 unmap_tx_frag(&adapter->pdev->dev, wrb,
1491 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001492 unmap_skb_hdr = false;
1493
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 num_wrbs++;
1495 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001496 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001499 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500}
1501
Sathya Perla859b1e42009-08-10 03:43:51 +00001502static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1503{
1504 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1505
1506 if (!eqe->evt)
1507 return NULL;
1508
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001509 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001510 eqe->evt = le32_to_cpu(eqe->evt);
1511 queue_tail_inc(&eq_obj->q);
1512 return eqe;
1513}
1514
1515static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001516 struct be_eq_obj *eq_obj,
1517 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001518{
1519 struct be_eq_entry *eqe;
1520 u16 num = 0;
1521
1522 while ((eqe = event_get(eq_obj)) != NULL) {
1523 eqe->evt = 0;
1524 num++;
1525 }
1526
1527 /* Deal with any spurious interrupts that come
1528 * without events
1529 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001530 if (!num)
1531 rearm = true;
1532
1533 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001534 if (num)
1535 napi_schedule(&eq_obj->napi);
1536
1537 return num;
1538}
1539
1540/* Just read and notify events without processing them.
1541 * Used at the time of destroying event queues */
1542static void be_eq_clean(struct be_adapter *adapter,
1543 struct be_eq_obj *eq_obj)
1544{
1545 struct be_eq_entry *eqe;
1546 u16 num = 0;
1547
1548 while ((eqe = event_get(eq_obj)) != NULL) {
1549 eqe->evt = 0;
1550 num++;
1551 }
1552
1553 if (num)
1554 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1555}
1556
Sathya Perla3abcded2010-10-03 22:12:27 -07001557static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558{
1559 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001560 struct be_queue_info *rxq = &rxo->q;
1561 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001562 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 u16 tail;
1564
1565 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001566 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1567 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001568 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569 }
1570
1571 /* Then free posted rx buffer that were not used */
1572 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001573 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001574 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 put_page(page_info->page);
1576 memset(page_info, 0, sizeof(*page_info));
1577 }
1578 BUG_ON(atomic_read(&rxq->used));
1579}
1580
Sathya Perla3c8def92011-06-12 20:01:58 +00001581static void be_tx_compl_clean(struct be_adapter *adapter,
1582 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583{
Sathya Perla3c8def92011-06-12 20:01:58 +00001584 struct be_queue_info *tx_cq = &txo->cq;
1585 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001586 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001587 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001588 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001589 struct sk_buff *sent_skb;
1590 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591
Sathya Perlaa8e91792009-08-10 03:42:43 +00001592 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1593 do {
1594 while ((txcp = be_tx_compl_get(tx_cq))) {
1595 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1596 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001597 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001598 cmpl++;
1599 }
1600 if (cmpl) {
1601 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001602 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001603 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001604 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001605 }
1606
1607 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1608 break;
1609
1610 mdelay(1);
1611 } while (true);
1612
1613 if (atomic_read(&txq->used))
1614 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1615 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001616
1617 /* free posted tx for which compls will never arrive */
1618 while (atomic_read(&txq->used)) {
1619 sent_skb = sent_skbs[txq->tail];
1620 end_idx = txq->tail;
1621 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001622 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1623 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001624 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001625 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001626 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627}
1628
Sathya Perla5fb379e2009-06-18 00:02:59 +00001629static void be_mcc_queues_destroy(struct be_adapter *adapter)
1630{
1631 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001632
Sathya Perla8788fdc2009-07-27 22:52:03 +00001633 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001634 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001635 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001636 be_queue_free(adapter, q);
1637
Sathya Perla8788fdc2009-07-27 22:52:03 +00001638 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001639 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001640 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001641 be_queue_free(adapter, q);
1642}
1643
1644/* Must be called only after TX qs are created as MCC shares TX EQ */
1645static int be_mcc_queues_create(struct be_adapter *adapter)
1646{
1647 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001648
1649 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001650 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001651 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001652 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001653 goto err;
1654
1655 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001656 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001657 goto mcc_cq_free;
1658
1659 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001660 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001661 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1662 goto mcc_cq_destroy;
1663
1664 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001665 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001666 goto mcc_q_free;
1667
1668 return 0;
1669
1670mcc_q_free:
1671 be_queue_free(adapter, q);
1672mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001673 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001674mcc_cq_free:
1675 be_queue_free(adapter, cq);
1676err:
1677 return -1;
1678}
1679
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680static void be_tx_queues_destroy(struct be_adapter *adapter)
1681{
1682 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001683 struct be_tx_obj *txo;
1684 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685
Sathya Perla3c8def92011-06-12 20:01:58 +00001686 for_all_tx_queues(adapter, txo, i) {
1687 q = &txo->q;
1688 if (q->created)
1689 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1690 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691
Sathya Perla3c8def92011-06-12 20:01:58 +00001692 q = &txo->cq;
1693 if (q->created)
1694 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1695 be_queue_free(adapter, q);
1696 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697
Sathya Perla859b1e42009-08-10 03:43:51 +00001698 /* Clear any residual events */
1699 be_eq_clean(adapter, &adapter->tx_eq);
1700
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 q = &adapter->tx_eq.q;
1702 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001703 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 be_queue_free(adapter, q);
1705}
1706
Sathya Perla3c8def92011-06-12 20:01:58 +00001707/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708static int be_tx_queues_create(struct be_adapter *adapter)
1709{
1710 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001711 struct be_tx_obj *txo;
1712 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713
1714 adapter->tx_eq.max_eqd = 0;
1715 adapter->tx_eq.min_eqd = 0;
1716 adapter->tx_eq.cur_eqd = 96;
1717 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001718
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001720 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1721 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 return -1;
1723
Sathya Perla8788fdc2009-07-27 22:52:03 +00001724 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001725 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001726 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001727
Sathya Perla3c8def92011-06-12 20:01:58 +00001728 for_all_tx_queues(adapter, txo, i) {
1729 cq = &txo->cq;
1730 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001732 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733
Sathya Perla3c8def92011-06-12 20:01:58 +00001734 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1735 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736
Sathya Perla3c8def92011-06-12 20:01:58 +00001737 q = &txo->q;
1738 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1739 sizeof(struct be_eth_wrb)))
1740 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741
Sathya Perla3c8def92011-06-12 20:01:58 +00001742 if (be_cmd_txq_create(adapter, q, cq))
1743 goto err;
1744 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745 return 0;
1746
Sathya Perla3c8def92011-06-12 20:01:58 +00001747err:
1748 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749 return -1;
1750}
1751
1752static void be_rx_queues_destroy(struct be_adapter *adapter)
1753{
1754 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001755 struct be_rx_obj *rxo;
1756 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757
Sathya Perla3abcded2010-10-03 22:12:27 -07001758 for_all_rx_queues(adapter, rxo, i) {
1759 q = &rxo->q;
1760 if (q->created) {
1761 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1762 /* After the rxq is invalidated, wait for a grace time
1763 * of 1ms for all dma to end and the flush compl to
1764 * arrive
1765 */
1766 mdelay(1);
1767 be_rx_q_clean(adapter, rxo);
1768 }
1769 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001770
Sathya Perla3abcded2010-10-03 22:12:27 -07001771 q = &rxo->cq;
1772 if (q->created)
1773 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1774 be_queue_free(adapter, q);
1775
1776 /* Clear any residual events */
1777 q = &rxo->rx_eq.q;
1778 if (q->created) {
1779 be_eq_clean(adapter, &rxo->rx_eq);
1780 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1781 }
1782 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784}
1785
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001786static u32 be_num_rxqs_want(struct be_adapter *adapter)
1787{
1788 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1789 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1790 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1791 } else {
1792 dev_warn(&adapter->pdev->dev,
1793 "No support for multiple RX queues\n");
1794 return 1;
1795 }
1796}
1797
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798static int be_rx_queues_create(struct be_adapter *adapter)
1799{
1800 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 struct be_rx_obj *rxo;
1802 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001804 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1805 msix_enabled(adapter) ?
1806 adapter->num_msix_vec - 1 : 1);
1807 if (adapter->num_rx_qs != MAX_RX_QS)
1808 dev_warn(&adapter->pdev->dev,
1809 "Can create only %d RX queues", adapter->num_rx_qs);
1810
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001812 for_all_rx_queues(adapter, rxo, i) {
1813 rxo->adapter = adapter;
1814 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1815 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816
Sathya Perla3abcded2010-10-03 22:12:27 -07001817 /* EQ */
1818 eq = &rxo->rx_eq.q;
1819 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1820 sizeof(struct be_eq_entry));
1821 if (rc)
1822 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823
Sathya Perla3abcded2010-10-03 22:12:27 -07001824 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1825 if (rc)
1826 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001828 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001829
Sathya Perla3abcded2010-10-03 22:12:27 -07001830 /* CQ */
1831 cq = &rxo->cq;
1832 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1833 sizeof(struct be_eth_rx_compl));
1834 if (rc)
1835 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001836
Sathya Perla3abcded2010-10-03 22:12:27 -07001837 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1838 if (rc)
1839 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001840 /* Rx Q */
1841 q = &rxo->q;
1842 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1843 sizeof(struct be_eth_rx_d));
1844 if (rc)
1845 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846
Sathya Perla3abcded2010-10-03 22:12:27 -07001847 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1848 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1849 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1850 if (rc)
1851 goto err;
1852 }
1853
1854 if (be_multi_rxq(adapter)) {
1855 u8 rsstable[MAX_RSS_QS];
1856
1857 for_all_rss_queues(adapter, rxo, i)
1858 rsstable[i] = rxo->rss_id;
1859
1860 rc = be_cmd_rss_config(adapter, rsstable,
1861 adapter->num_rx_qs - 1);
1862 if (rc)
1863 goto err;
1864 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865
1866 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001867err:
1868 be_rx_queues_destroy(adapter);
1869 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001872static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001873{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001874 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1875 if (!eqe->evt)
1876 return false;
1877 else
1878 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001879}
1880
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881static irqreturn_t be_intx(int irq, void *dev)
1882{
1883 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001884 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001885 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001887 if (lancer_chip(adapter)) {
1888 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001889 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001890 for_all_rx_queues(adapter, rxo, i) {
1891 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001892 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001893 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001895 if (!(tx || rx))
1896 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001897
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001898 } else {
1899 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1900 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1901 if (!isr)
1902 return IRQ_NONE;
1903
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001904 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001905 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001906
1907 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001908 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001909 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001910 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001911 }
Sathya Perlac001c212009-07-01 01:06:07 +00001912
Sathya Perla8788fdc2009-07-27 22:52:03 +00001913 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914}
1915
1916static irqreturn_t be_msix_rx(int irq, void *dev)
1917{
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 struct be_rx_obj *rxo = dev;
1919 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920
Sathya Perla3c8def92011-06-12 20:01:58 +00001921 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922
1923 return IRQ_HANDLED;
1924}
1925
Sathya Perla5fb379e2009-06-18 00:02:59 +00001926static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927{
1928 struct be_adapter *adapter = dev;
1929
Sathya Perla3c8def92011-06-12 20:01:58 +00001930 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931
1932 return IRQ_HANDLED;
1933}
1934
Sathya Perla2e588f82011-03-11 02:49:26 +00001935static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936{
Sathya Perla2e588f82011-03-11 02:49:26 +00001937 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938}
1939
stephen hemminger49b05222010-10-21 07:50:48 +00001940static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941{
1942 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001943 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1944 struct be_adapter *adapter = rxo->adapter;
1945 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001946 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947 u32 work_done;
1948
Sathya Perla3abcded2010-10-03 22:12:27 -07001949 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001951 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952 if (!rxcp)
1953 break;
1954
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001955 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001956 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001957 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001958 be_rx_compl_process_gro(adapter, rxo, rxcp);
1959 else
1960 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001961 } else if (rxcp->pkt_size == 0) {
1962 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001963 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001964
Sathya Perla2e588f82011-03-11 02:49:26 +00001965 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966 }
1967
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001969 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001970 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971
1972 /* All consumed */
1973 if (work_done < budget) {
1974 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001975 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976 } else {
1977 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001978 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979 }
1980 return work_done;
1981}
1982
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001983/* As TX and MCC share the same EQ check for both TX and MCC completions.
1984 * For TX/MCC we don't honour budget; consume everything
1985 */
1986static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001988 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1989 struct be_adapter *adapter =
1990 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001991 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001993 int tx_compl, mcc_compl, status = 0;
1994 u8 i;
1995 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996
Sathya Perla3c8def92011-06-12 20:01:58 +00001997 for_all_tx_queues(adapter, txo, i) {
1998 tx_compl = 0;
1999 num_wrbs = 0;
2000 while ((txcp = be_tx_compl_get(&txo->cq))) {
2001 num_wrbs += be_tx_compl_process(adapter, txo,
2002 AMAP_GET_BITS(struct amap_eth_tx_compl,
2003 wrb_index, txcp));
2004 tx_compl++;
2005 }
2006 if (tx_compl) {
2007 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2008
2009 atomic_sub(num_wrbs, &txo->q.used);
2010
2011 /* As Tx wrbs have been freed up, wake up netdev queue
2012 * if it was stopped due to lack of tx wrbs. */
2013 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2014 atomic_read(&txo->q.used) < txo->q.len / 2) {
2015 netif_wake_subqueue(adapter->netdev, i);
2016 }
2017
2018 adapter->drv_stats.be_tx_events++;
2019 txo->stats.be_tx_compl += tx_compl;
2020 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021 }
2022
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002023 mcc_compl = be_process_mcc(adapter, &status);
2024
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002025 if (mcc_compl) {
2026 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2027 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2028 }
2029
Sathya Perla3c8def92011-06-12 20:01:58 +00002030 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002031
Sathya Perla3c8def92011-06-12 20:01:58 +00002032 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033 return 1;
2034}
2035
Ajit Khaparded053de92010-09-03 06:23:30 +00002036void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002037{
2038 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2039 u32 i;
2040
2041 pci_read_config_dword(adapter->pdev,
2042 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2043 pci_read_config_dword(adapter->pdev,
2044 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2045 pci_read_config_dword(adapter->pdev,
2046 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2047 pci_read_config_dword(adapter->pdev,
2048 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2049
2050 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2051 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2052
Ajit Khaparded053de92010-09-03 06:23:30 +00002053 if (ue_status_lo || ue_status_hi) {
2054 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002055 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00002056 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2057 }
2058
Ajit Khaparde7c185272010-07-29 06:16:33 +00002059 if (ue_status_lo) {
2060 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2061 if (ue_status_lo & 1)
2062 dev_err(&adapter->pdev->dev,
2063 "UE: %s bit set\n", ue_status_low_desc[i]);
2064 }
2065 }
2066 if (ue_status_hi) {
2067 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2068 if (ue_status_hi & 1)
2069 dev_err(&adapter->pdev->dev,
2070 "UE: %s bit set\n", ue_status_hi_desc[i]);
2071 }
2072 }
2073
2074}
2075
Sathya Perlaea1dae12009-03-19 23:56:20 -07002076static void be_worker(struct work_struct *work)
2077{
2078 struct be_adapter *adapter =
2079 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002080 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002081 struct be_tx_obj *txo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002082 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002083
Sathya Perla16da8252011-03-21 20:49:27 +00002084 if (!adapter->ue_detected && !lancer_chip(adapter))
2085 be_detect_dump_ue(adapter);
2086
Somnath Koturf203af72010-10-25 23:01:03 +00002087 /* when interrupts are not yet enabled, just reap any pending
2088 * mcc completions */
2089 if (!netif_running(adapter->netdev)) {
2090 int mcc_compl, status = 0;
2091
2092 mcc_compl = be_process_mcc(adapter, &status);
2093
2094 if (mcc_compl) {
2095 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2096 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2097 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002098
Somnath Koturf203af72010-10-25 23:01:03 +00002099 goto reschedule;
2100 }
2101
Selvin Xavier005d5692011-05-16 07:36:35 +00002102 if (!adapter->stats_cmd_sent) {
2103 if (lancer_chip(adapter))
2104 lancer_cmd_get_pport_stats(adapter,
2105 &adapter->stats_cmd);
2106 else
2107 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2108 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002109
2110 for_all_tx_queues(adapter, txo, i)
2111 be_tx_rate_update(txo);
Sathya Perla4097f662009-03-24 16:40:13 -07002112
Sathya Perla3abcded2010-10-03 22:12:27 -07002113 for_all_rx_queues(adapter, rxo, i) {
2114 be_rx_rate_update(rxo);
2115 be_rx_eqd_update(adapter, rxo);
2116
2117 if (rxo->rx_post_starved) {
2118 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002119 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002120 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002121 }
2122
Somnath Koturf203af72010-10-25 23:01:03 +00002123reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002124 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002125 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2126}
2127
Sathya Perla8d56ff12009-11-22 22:02:26 +00002128static void be_msix_disable(struct be_adapter *adapter)
2129{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002130 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002131 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002132 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002133 }
2134}
2135
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136static void be_msix_enable(struct be_adapter *adapter)
2137{
Sathya Perla3abcded2010-10-03 22:12:27 -07002138#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002139 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002140
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002141 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002142
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002143 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144 adapter->msix_entries[i].entry = i;
2145
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002146 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002147 if (status == 0) {
2148 goto done;
2149 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002150 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002151 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002152 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002153 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002154 }
2155 return;
2156done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002157 adapter->num_msix_vec = num_vec;
2158 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002159}
2160
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002161static void be_sriov_enable(struct be_adapter *adapter)
2162{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002163 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002164#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002165 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002166 int status, pos;
2167 u16 nvfs;
2168
2169 pos = pci_find_ext_capability(adapter->pdev,
2170 PCI_EXT_CAP_ID_SRIOV);
2171 pci_read_config_word(adapter->pdev,
2172 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2173
2174 if (num_vfs > nvfs) {
2175 dev_info(&adapter->pdev->dev,
2176 "Device supports %d VFs and not %d\n",
2177 nvfs, num_vfs);
2178 num_vfs = nvfs;
2179 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002180
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002181 status = pci_enable_sriov(adapter->pdev, num_vfs);
2182 adapter->sriov_enabled = status ? false : true;
2183 }
2184#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002185}
2186
2187static void be_sriov_disable(struct be_adapter *adapter)
2188{
2189#ifdef CONFIG_PCI_IOV
2190 if (adapter->sriov_enabled) {
2191 pci_disable_sriov(adapter->pdev);
2192 adapter->sriov_enabled = false;
2193 }
2194#endif
2195}
2196
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002197static inline int be_msix_vec_get(struct be_adapter *adapter,
2198 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002200 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002201}
2202
2203static int be_request_irq(struct be_adapter *adapter,
2204 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002205 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002206{
2207 struct net_device *netdev = adapter->netdev;
2208 int vec;
2209
2210 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002211 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002213}
2214
Sathya Perla3abcded2010-10-03 22:12:27 -07002215static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2216 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002217{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002218 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002219 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220}
2221
2222static int be_msix_register(struct be_adapter *adapter)
2223{
Sathya Perla3abcded2010-10-03 22:12:27 -07002224 struct be_rx_obj *rxo;
2225 int status, i;
2226 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227
Sathya Perla3abcded2010-10-03 22:12:27 -07002228 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2229 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230 if (status)
2231 goto err;
2232
Sathya Perla3abcded2010-10-03 22:12:27 -07002233 for_all_rx_queues(adapter, rxo, i) {
2234 sprintf(qname, "rxq%d", i);
2235 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2236 qname, rxo);
2237 if (status)
2238 goto err_msix;
2239 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002240
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002242
Sathya Perla3abcded2010-10-03 22:12:27 -07002243err_msix:
2244 be_free_irq(adapter, &adapter->tx_eq, adapter);
2245
2246 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2247 be_free_irq(adapter, &rxo->rx_eq, rxo);
2248
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249err:
2250 dev_warn(&adapter->pdev->dev,
2251 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002252 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253 return status;
2254}
2255
2256static int be_irq_register(struct be_adapter *adapter)
2257{
2258 struct net_device *netdev = adapter->netdev;
2259 int status;
2260
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002261 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262 status = be_msix_register(adapter);
2263 if (status == 0)
2264 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002265 /* INTx is not supported for VF */
2266 if (!be_physfn(adapter))
2267 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268 }
2269
2270 /* INTx */
2271 netdev->irq = adapter->pdev->irq;
2272 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2273 adapter);
2274 if (status) {
2275 dev_err(&adapter->pdev->dev,
2276 "INTx request IRQ failed - err %d\n", status);
2277 return status;
2278 }
2279done:
2280 adapter->isr_registered = true;
2281 return 0;
2282}
2283
2284static void be_irq_unregister(struct be_adapter *adapter)
2285{
2286 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002287 struct be_rx_obj *rxo;
2288 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289
2290 if (!adapter->isr_registered)
2291 return;
2292
2293 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002294 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295 free_irq(netdev->irq, adapter);
2296 goto done;
2297 }
2298
2299 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002300 be_free_irq(adapter, &adapter->tx_eq, adapter);
2301
2302 for_all_rx_queues(adapter, rxo, i)
2303 be_free_irq(adapter, &rxo->rx_eq, rxo);
2304
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305done:
2306 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307}
2308
Sathya Perla889cd4b2010-05-30 23:33:45 +00002309static int be_close(struct net_device *netdev)
2310{
2311 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002312 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002313 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002314 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002316
Sathya Perla889cd4b2010-05-30 23:33:45 +00002317 be_async_mcc_disable(adapter);
2318
Sathya Perla889cd4b2010-05-30 23:33:45 +00002319 netif_carrier_off(netdev);
2320 adapter->link_up = false;
2321
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002322 if (!lancer_chip(adapter))
2323 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002324
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002325 for_all_rx_queues(adapter, rxo, i)
2326 napi_disable(&rxo->rx_eq.napi);
2327
2328 napi_disable(&tx_eq->napi);
2329
2330 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002331 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2332 for_all_rx_queues(adapter, rxo, i)
2333 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002334 for_all_tx_queues(adapter, txo, i)
2335 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002336 }
2337
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002338 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002339 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002340 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002341
2342 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002343 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002344 synchronize_irq(vec);
2345 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002346 } else {
2347 synchronize_irq(netdev->irq);
2348 }
2349 be_irq_unregister(adapter);
2350
Sathya Perla889cd4b2010-05-30 23:33:45 +00002351 /* Wait for all pending tx completions to arrive so that
2352 * all tx skbs are freed.
2353 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002354 for_all_tx_queues(adapter, txo, i)
2355 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002356
2357 return 0;
2358}
2359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002360static int be_open(struct net_device *netdev)
2361{
2362 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002363 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002364 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002365 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002366 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002367 u8 mac_speed;
2368 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002369
Sathya Perla3abcded2010-10-03 22:12:27 -07002370 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002371 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002372 napi_enable(&rxo->rx_eq.napi);
2373 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002374 napi_enable(&tx_eq->napi);
2375
2376 be_irq_register(adapter);
2377
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002378 if (!lancer_chip(adapter))
2379 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002380
2381 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002382 for_all_rx_queues(adapter, rxo, i) {
2383 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2384 be_cq_notify(adapter, rxo->cq.id, true, 0);
2385 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002386 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002387
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002388 /* Now that interrupts are on we can process async mcc */
2389 be_async_mcc_enable(adapter);
2390
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002391 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002392 &link_speed, 0);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002393 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002394 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002395 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002396
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002397 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002398 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002399 if (status)
2400 goto err;
2401
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002402 status = be_cmd_set_flow_control(adapter,
2403 adapter->tx_fc, adapter->rx_fc);
2404 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002405 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002406 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002407
Sathya Perla889cd4b2010-05-30 23:33:45 +00002408 return 0;
2409err:
2410 be_close(adapter->netdev);
2411 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002412}
2413
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002414static int be_setup_wol(struct be_adapter *adapter, bool enable)
2415{
2416 struct be_dma_mem cmd;
2417 int status = 0;
2418 u8 mac[ETH_ALEN];
2419
2420 memset(mac, 0, ETH_ALEN);
2421
2422 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002423 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2424 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002425 if (cmd.va == NULL)
2426 return -1;
2427 memset(cmd.va, 0, cmd.size);
2428
2429 if (enable) {
2430 status = pci_write_config_dword(adapter->pdev,
2431 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2432 if (status) {
2433 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002434 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002435 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2436 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002437 return status;
2438 }
2439 status = be_cmd_enable_magic_wol(adapter,
2440 adapter->netdev->dev_addr, &cmd);
2441 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2442 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2443 } else {
2444 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2445 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2446 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2447 }
2448
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002449 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002450 return status;
2451}
2452
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002453/*
2454 * Generate a seed MAC address from the PF MAC Address using jhash.
2455 * MAC Address for VFs are assigned incrementally starting from the seed.
2456 * These addresses are programmed in the ASIC by the PF and the VF driver
2457 * queries for the MAC address during its probe.
2458 */
2459static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2460{
2461 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002462 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002463 u8 mac[ETH_ALEN];
2464
2465 be_vf_eth_addr_generate(adapter, mac);
2466
2467 for (vf = 0; vf < num_vfs; vf++) {
2468 status = be_cmd_pmac_add(adapter, mac,
2469 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002470 &adapter->vf_cfg[vf].vf_pmac_id,
2471 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002472 if (status)
2473 dev_err(&adapter->pdev->dev,
2474 "Mac address add failed for VF %d\n", vf);
2475 else
2476 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2477
2478 mac[5] += 1;
2479 }
2480 return status;
2481}
2482
2483static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2484{
2485 u32 vf;
2486
2487 for (vf = 0; vf < num_vfs; vf++) {
2488 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2489 be_cmd_pmac_del(adapter,
2490 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002491 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002492 }
2493}
2494
Sathya Perla5fb379e2009-06-18 00:02:59 +00002495static int be_setup(struct be_adapter *adapter)
2496{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002497 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002498 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002500 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002501
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002502 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2503 BE_IF_FLAGS_BROADCAST |
2504 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002505
2506 if (be_physfn(adapter)) {
2507 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2508 BE_IF_FLAGS_PROMISCUOUS |
2509 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2510 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002511
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002512 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002513 cap_flags |= BE_IF_FLAGS_RSS;
2514 en_flags |= BE_IF_FLAGS_RSS;
2515 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002516 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002517
2518 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2519 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002520 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521 if (status != 0)
2522 goto do_none;
2523
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002524 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002525 if (adapter->sriov_enabled) {
2526 while (vf < num_vfs) {
2527 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2528 BE_IF_FLAGS_BROADCAST;
2529 status = be_cmd_if_create(adapter, cap_flags,
2530 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002531 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002532 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002533 if (status) {
2534 dev_err(&adapter->pdev->dev,
2535 "Interface Create failed for VF %d\n",
2536 vf);
2537 goto if_destroy;
2538 }
2539 adapter->vf_cfg[vf].vf_pmac_id =
2540 BE_INVALID_PMAC_ID;
2541 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002542 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002543 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002544 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002545 status = be_cmd_mac_addr_query(adapter, mac,
2546 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2547 if (!status) {
2548 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2549 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2550 }
2551 }
2552
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002553 status = be_tx_queues_create(adapter);
2554 if (status != 0)
2555 goto if_destroy;
2556
2557 status = be_rx_queues_create(adapter);
2558 if (status != 0)
2559 goto tx_qs_destroy;
2560
Sathya Perla5fb379e2009-06-18 00:02:59 +00002561 status = be_mcc_queues_create(adapter);
2562 if (status != 0)
2563 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002564
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002565 adapter->link_speed = -1;
2566
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002567 return 0;
2568
Sathya Perla5fb379e2009-06-18 00:02:59 +00002569rx_qs_destroy:
2570 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571tx_qs_destroy:
2572 be_tx_queues_destroy(adapter);
2573if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002574 if (be_physfn(adapter) && adapter->sriov_enabled)
2575 for (vf = 0; vf < num_vfs; vf++)
2576 if (adapter->vf_cfg[vf].vf_if_handle)
2577 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002578 adapter->vf_cfg[vf].vf_if_handle,
2579 vf + 1);
2580 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002581do_none:
2582 return status;
2583}
2584
Sathya Perla5fb379e2009-06-18 00:02:59 +00002585static int be_clear(struct be_adapter *adapter)
2586{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002587 int vf;
2588
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002589 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002590 be_vf_eth_addr_rem(adapter);
2591
Sathya Perla1a8887d2009-08-17 00:58:41 +00002592 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002593 be_rx_queues_destroy(adapter);
2594 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002595 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002596
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002597 if (be_physfn(adapter) && adapter->sriov_enabled)
2598 for (vf = 0; vf < num_vfs; vf++)
2599 if (adapter->vf_cfg[vf].vf_if_handle)
2600 be_cmd_if_destroy(adapter,
2601 adapter->vf_cfg[vf].vf_if_handle,
2602 vf + 1);
2603
Ajit Khaparde658681f2011-02-11 13:34:46 +00002604 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002605
Sathya Perla2243e2e2009-11-22 22:02:03 +00002606 /* tell fw we're done with firing cmds */
2607 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002608 return 0;
2609}
2610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002611
Ajit Khaparde84517482009-09-04 03:12:16 +00002612#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002613static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002614 const u8 *p, u32 img_start, int image_size,
2615 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002616{
2617 u32 crc_offset;
2618 u8 flashed_crc[4];
2619 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002620
2621 crc_offset = hdr_size + img_start + image_size - 4;
2622
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002623 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002624
2625 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002626 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002627 if (status) {
2628 dev_err(&adapter->pdev->dev,
2629 "could not get crc from flash, not flashing redboot\n");
2630 return false;
2631 }
2632
2633 /*update redboot only if crc does not match*/
2634 if (!memcmp(flashed_crc, p, 4))
2635 return false;
2636 else
2637 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002638}
2639
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002640static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002641 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002642 struct be_dma_mem *flash_cmd, int num_of_images)
2643
Ajit Khaparde84517482009-09-04 03:12:16 +00002644{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002645 int status = 0, i, filehdr_size = 0;
2646 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002647 int num_bytes;
2648 const u8 *p = fw->data;
2649 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002650 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002651 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002652
Joe Perches215faf92010-12-21 02:16:10 -08002653 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002654 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2655 FLASH_IMAGE_MAX_SIZE_g3},
2656 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2657 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2658 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2659 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2660 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2661 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2662 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2663 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2664 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2665 FLASH_IMAGE_MAX_SIZE_g3},
2666 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2667 FLASH_IMAGE_MAX_SIZE_g3},
2668 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002669 FLASH_IMAGE_MAX_SIZE_g3},
2670 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2671 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002672 };
Joe Perches215faf92010-12-21 02:16:10 -08002673 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002674 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2675 FLASH_IMAGE_MAX_SIZE_g2},
2676 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2677 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2678 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2679 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2680 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2681 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2682 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2683 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2684 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2685 FLASH_IMAGE_MAX_SIZE_g2},
2686 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2687 FLASH_IMAGE_MAX_SIZE_g2},
2688 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2689 FLASH_IMAGE_MAX_SIZE_g2}
2690 };
2691
2692 if (adapter->generation == BE_GEN3) {
2693 pflashcomp = gen3_flash_types;
2694 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002695 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002696 } else {
2697 pflashcomp = gen2_flash_types;
2698 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002699 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002700 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002701 for (i = 0; i < num_comp; i++) {
2702 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2703 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2704 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002705 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2706 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002707 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2708 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002709 continue;
2710 p = fw->data;
2711 p += filehdr_size + pflashcomp[i].offset
2712 + (num_of_images * sizeof(struct image_hdr));
2713 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002714 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002715 total_bytes = pflashcomp[i].size;
2716 while (total_bytes) {
2717 if (total_bytes > 32*1024)
2718 num_bytes = 32*1024;
2719 else
2720 num_bytes = total_bytes;
2721 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002722
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002723 if (!total_bytes)
2724 flash_op = FLASHROM_OPER_FLASH;
2725 else
2726 flash_op = FLASHROM_OPER_SAVE;
2727 memcpy(req->params.data_buf, p, num_bytes);
2728 p += num_bytes;
2729 status = be_cmd_write_flashrom(adapter, flash_cmd,
2730 pflashcomp[i].optype, flash_op, num_bytes);
2731 if (status) {
2732 dev_err(&adapter->pdev->dev,
2733 "cmd to write to flash rom failed.\n");
2734 return -1;
2735 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002736 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002737 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002738 return 0;
2739}
2740
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002741static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2742{
2743 if (fhdr == NULL)
2744 return 0;
2745 if (fhdr->build[0] == '3')
2746 return BE_GEN3;
2747 else if (fhdr->build[0] == '2')
2748 return BE_GEN2;
2749 else
2750 return 0;
2751}
2752
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002753static int lancer_fw_download(struct be_adapter *adapter,
2754 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002755{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002756#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2757#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2758 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002759 const u8 *data_ptr = NULL;
2760 u8 *dest_image_ptr = NULL;
2761 size_t image_size = 0;
2762 u32 chunk_size = 0;
2763 u32 data_written = 0;
2764 u32 offset = 0;
2765 int status = 0;
2766 u8 add_status = 0;
2767
2768 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2769 dev_err(&adapter->pdev->dev,
2770 "FW Image not properly aligned. "
2771 "Length must be 4 byte aligned.\n");
2772 status = -EINVAL;
2773 goto lancer_fw_exit;
2774 }
2775
2776 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2777 + LANCER_FW_DOWNLOAD_CHUNK;
2778 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2779 &flash_cmd.dma, GFP_KERNEL);
2780 if (!flash_cmd.va) {
2781 status = -ENOMEM;
2782 dev_err(&adapter->pdev->dev,
2783 "Memory allocation failure while flashing\n");
2784 goto lancer_fw_exit;
2785 }
2786
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002787 dest_image_ptr = flash_cmd.va +
2788 sizeof(struct lancer_cmd_req_write_object);
2789 image_size = fw->size;
2790 data_ptr = fw->data;
2791
2792 while (image_size) {
2793 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2794
2795 /* Copy the image chunk content. */
2796 memcpy(dest_image_ptr, data_ptr, chunk_size);
2797
2798 status = lancer_cmd_write_object(adapter, &flash_cmd,
2799 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2800 &data_written, &add_status);
2801
2802 if (status)
2803 break;
2804
2805 offset += data_written;
2806 data_ptr += data_written;
2807 image_size -= data_written;
2808 }
2809
2810 if (!status) {
2811 /* Commit the FW written */
2812 status = lancer_cmd_write_object(adapter, &flash_cmd,
2813 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2814 &data_written, &add_status);
2815 }
2816
2817 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2818 flash_cmd.dma);
2819 if (status) {
2820 dev_err(&adapter->pdev->dev,
2821 "Firmware load error. "
2822 "Status code: 0x%x Additional Status: 0x%x\n",
2823 status, add_status);
2824 goto lancer_fw_exit;
2825 }
2826
2827 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2828lancer_fw_exit:
2829 return status;
2830}
2831
2832static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2833{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002834 struct flash_file_hdr_g2 *fhdr;
2835 struct flash_file_hdr_g3 *fhdr3;
2836 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002837 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002838 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002839 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002840
2841 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002842 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002843
Ajit Khaparde84517482009-09-04 03:12:16 +00002844 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002845 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2846 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002847 if (!flash_cmd.va) {
2848 status = -ENOMEM;
2849 dev_err(&adapter->pdev->dev,
2850 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002851 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002852 }
2853
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002854 if ((adapter->generation == BE_GEN3) &&
2855 (get_ufigen_type(fhdr) == BE_GEN3)) {
2856 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002857 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2858 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002859 img_hdr_ptr = (struct image_hdr *) (fw->data +
2860 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002861 i * sizeof(struct image_hdr)));
2862 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2863 status = be_flash_data(adapter, fw, &flash_cmd,
2864 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002865 }
2866 } else if ((adapter->generation == BE_GEN2) &&
2867 (get_ufigen_type(fhdr) == BE_GEN2)) {
2868 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2869 } else {
2870 dev_err(&adapter->pdev->dev,
2871 "UFI and Interface are not compatible for flashing\n");
2872 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002873 }
2874
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002875 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2876 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002877 if (status) {
2878 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002879 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002880 }
2881
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002882 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002883
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002884be_fw_exit:
2885 return status;
2886}
2887
2888int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2889{
2890 const struct firmware *fw;
2891 int status;
2892
2893 if (!netif_running(adapter->netdev)) {
2894 dev_err(&adapter->pdev->dev,
2895 "Firmware load not allowed (interface is down)\n");
2896 return -1;
2897 }
2898
2899 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2900 if (status)
2901 goto fw_exit;
2902
2903 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2904
2905 if (lancer_chip(adapter))
2906 status = lancer_fw_download(adapter, fw);
2907 else
2908 status = be_fw_download(adapter, fw);
2909
Ajit Khaparde84517482009-09-04 03:12:16 +00002910fw_exit:
2911 release_firmware(fw);
2912 return status;
2913}
2914
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002915static struct net_device_ops be_netdev_ops = {
2916 .ndo_open = be_open,
2917 .ndo_stop = be_close,
2918 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002919 .ndo_set_rx_mode = be_set_multicast_list,
2920 .ndo_set_mac_address = be_mac_addr_set,
2921 .ndo_change_mtu = be_change_mtu,
2922 .ndo_validate_addr = eth_validate_addr,
2923 .ndo_vlan_rx_register = be_vlan_register,
2924 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2925 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002926 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002927 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002928 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002929 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002930};
2931
2932static void be_netdev_init(struct net_device *netdev)
2933{
2934 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002935 struct be_rx_obj *rxo;
2936 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002938 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002939 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2940 NETIF_F_HW_VLAN_TX;
2941 if (be_multi_rxq(adapter))
2942 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002943
2944 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002945 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002946
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002947 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002948 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002949
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002950 netdev->flags |= IFF_MULTICAST;
2951
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002952 /* Default settings for Rx and Tx flow control */
2953 adapter->rx_fc = true;
2954 adapter->tx_fc = true;
2955
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002956 netif_set_gso_max_size(netdev, 65535);
2957
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002958 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2959
2960 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2961
Sathya Perla3abcded2010-10-03 22:12:27 -07002962 for_all_rx_queues(adapter, rxo, i)
2963 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2964 BE_NAPI_WEIGHT);
2965
Sathya Perla5fb379e2009-06-18 00:02:59 +00002966 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002967 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968}
2969
2970static void be_unmap_pci_bars(struct be_adapter *adapter)
2971{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002972 if (adapter->csr)
2973 iounmap(adapter->csr);
2974 if (adapter->db)
2975 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002976 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002977 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002978}
2979
2980static int be_map_pci_bars(struct be_adapter *adapter)
2981{
2982 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002983 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002984
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002985 if (lancer_chip(adapter)) {
2986 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2987 pci_resource_len(adapter->pdev, 0));
2988 if (addr == NULL)
2989 return -ENOMEM;
2990 adapter->db = addr;
2991 return 0;
2992 }
2993
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002994 if (be_physfn(adapter)) {
2995 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2996 pci_resource_len(adapter->pdev, 2));
2997 if (addr == NULL)
2998 return -ENOMEM;
2999 adapter->csr = addr;
3000 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003001
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003002 if (adapter->generation == BE_GEN2) {
3003 pcicfg_reg = 1;
3004 db_reg = 4;
3005 } else {
3006 pcicfg_reg = 0;
3007 if (be_physfn(adapter))
3008 db_reg = 4;
3009 else
3010 db_reg = 0;
3011 }
3012 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3013 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003014 if (addr == NULL)
3015 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003016 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003017
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003018 if (be_physfn(adapter)) {
3019 addr = ioremap_nocache(
3020 pci_resource_start(adapter->pdev, pcicfg_reg),
3021 pci_resource_len(adapter->pdev, pcicfg_reg));
3022 if (addr == NULL)
3023 goto pci_map_err;
3024 adapter->pcicfg = addr;
3025 } else
3026 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003027
3028 return 0;
3029pci_map_err:
3030 be_unmap_pci_bars(adapter);
3031 return -ENOMEM;
3032}
3033
3034
3035static void be_ctrl_cleanup(struct be_adapter *adapter)
3036{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003037 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003038
3039 be_unmap_pci_bars(adapter);
3040
3041 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003042 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3043 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003044
3045 mem = &adapter->mc_cmd_mem;
3046 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003047 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3048 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003049}
3050
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003051static int be_ctrl_init(struct be_adapter *adapter)
3052{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003053 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3054 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003055 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003057
3058 status = be_map_pci_bars(adapter);
3059 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003060 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003061
3062 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003063 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3064 mbox_mem_alloc->size,
3065 &mbox_mem_alloc->dma,
3066 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003068 status = -ENOMEM;
3069 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00003071
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003072 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3073 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3074 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3075 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003076
3077 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003078 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3079 mc_cmd_mem->size, &mc_cmd_mem->dma,
3080 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003081 if (mc_cmd_mem->va == NULL) {
3082 status = -ENOMEM;
3083 goto free_mbox;
3084 }
3085 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3086
Ivan Vecera29849612010-12-14 05:43:19 +00003087 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003088 spin_lock_init(&adapter->mcc_lock);
3089 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003091 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003092 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003093 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003094
3095free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003096 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3097 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003098
3099unmap_pci_bars:
3100 be_unmap_pci_bars(adapter);
3101
3102done:
3103 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003104}
3105
3106static void be_stats_cleanup(struct be_adapter *adapter)
3107{
Sathya Perla3abcded2010-10-03 22:12:27 -07003108 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003109
3110 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003111 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3112 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003113}
3114
3115static int be_stats_init(struct be_adapter *adapter)
3116{
Sathya Perla3abcded2010-10-03 22:12:27 -07003117 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003118
Selvin Xavier005d5692011-05-16 07:36:35 +00003119 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003120 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003121 } else {
3122 if (lancer_chip(adapter))
3123 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3124 else
3125 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3126 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003127 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3128 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003129 if (cmd->va == NULL)
3130 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003131 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003132 return 0;
3133}
3134
3135static void __devexit be_remove(struct pci_dev *pdev)
3136{
3137 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003138
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139 if (!adapter)
3140 return;
3141
Somnath Koturf203af72010-10-25 23:01:03 +00003142 cancel_delayed_work_sync(&adapter->work);
3143
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003144 unregister_netdev(adapter->netdev);
3145
Sathya Perla5fb379e2009-06-18 00:02:59 +00003146 be_clear(adapter);
3147
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148 be_stats_cleanup(adapter);
3149
3150 be_ctrl_cleanup(adapter);
3151
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003152 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003153 be_sriov_disable(adapter);
3154
Sathya Perla8d56ff12009-11-22 22:02:26 +00003155 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003156
3157 pci_set_drvdata(pdev, NULL);
3158 pci_release_regions(pdev);
3159 pci_disable_device(pdev);
3160
3161 free_netdev(adapter->netdev);
3162}
3163
Sathya Perla2243e2e2009-11-22 22:02:03 +00003164static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003165{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003166 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003167 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003168
Sathya Perla8788fdc2009-07-27 22:52:03 +00003169 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003170 if (status)
3171 return status;
3172
Sathya Perla3abcded2010-10-03 22:12:27 -07003173 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3174 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003175 if (status)
3176 return status;
3177
3178 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003179
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003180 /* A default permanent address is given to each VF for Lancer*/
3181 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003182 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003183 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003184
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003185 if (status)
3186 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003187
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003188 if (!is_valid_ether_addr(mac))
3189 return -EADDRNOTAVAIL;
3190
3191 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3192 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3193 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003194
Ajit Khaparde3486be22010-07-23 02:04:54 +00003195 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003196 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3197 else
3198 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3199
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003200 status = be_cmd_get_cntl_attributes(adapter);
3201 if (status)
3202 return status;
3203
Sathya Perla2e588f82011-03-11 02:49:26 +00003204 be_cmd_check_native_mode(adapter);
Sathya Perla3c8def92011-06-12 20:01:58 +00003205
3206 if ((num_vfs && adapter->sriov_enabled) ||
3207 (adapter->function_mode & 0x400) ||
3208 lancer_chip(adapter) || !be_physfn(adapter)) {
3209 adapter->num_tx_qs = 1;
3210 netif_set_real_num_tx_queues(adapter->netdev,
3211 adapter->num_tx_qs);
3212 } else {
3213 adapter->num_tx_qs = MAX_TX_QS;
3214 }
3215
Sathya Perla2243e2e2009-11-22 22:02:03 +00003216 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003217}
3218
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003219static int be_dev_family_check(struct be_adapter *adapter)
3220{
3221 struct pci_dev *pdev = adapter->pdev;
3222 u32 sli_intf = 0, if_type;
3223
3224 switch (pdev->device) {
3225 case BE_DEVICE_ID1:
3226 case OC_DEVICE_ID1:
3227 adapter->generation = BE_GEN2;
3228 break;
3229 case BE_DEVICE_ID2:
3230 case OC_DEVICE_ID2:
3231 adapter->generation = BE_GEN3;
3232 break;
3233 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003234 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003235 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3236 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3237 SLI_INTF_IF_TYPE_SHIFT;
3238
3239 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3240 if_type != 0x02) {
3241 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3242 return -EINVAL;
3243 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003244 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3245 SLI_INTF_FAMILY_SHIFT);
3246 adapter->generation = BE_GEN3;
3247 break;
3248 default:
3249 adapter->generation = 0;
3250 }
3251 return 0;
3252}
3253
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003254static int lancer_wait_ready(struct be_adapter *adapter)
3255{
3256#define SLIPORT_READY_TIMEOUT 500
3257 u32 sliport_status;
3258 int status = 0, i;
3259
3260 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3261 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3262 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3263 break;
3264
3265 msleep(20);
3266 }
3267
3268 if (i == SLIPORT_READY_TIMEOUT)
3269 status = -1;
3270
3271 return status;
3272}
3273
3274static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3275{
3276 int status;
3277 u32 sliport_status, err, reset_needed;
3278 status = lancer_wait_ready(adapter);
3279 if (!status) {
3280 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3281 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3282 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3283 if (err && reset_needed) {
3284 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3285 adapter->db + SLIPORT_CONTROL_OFFSET);
3286
3287 /* check adapter has corrected the error */
3288 status = lancer_wait_ready(adapter);
3289 sliport_status = ioread32(adapter->db +
3290 SLIPORT_STATUS_OFFSET);
3291 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3292 SLIPORT_STATUS_RN_MASK);
3293 if (status || sliport_status)
3294 status = -1;
3295 } else if (err || reset_needed) {
3296 status = -1;
3297 }
3298 }
3299 return status;
3300}
3301
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003302static int __devinit be_probe(struct pci_dev *pdev,
3303 const struct pci_device_id *pdev_id)
3304{
3305 int status = 0;
3306 struct be_adapter *adapter;
3307 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003308
3309 status = pci_enable_device(pdev);
3310 if (status)
3311 goto do_none;
3312
3313 status = pci_request_regions(pdev, DRV_NAME);
3314 if (status)
3315 goto disable_dev;
3316 pci_set_master(pdev);
3317
Sathya Perla3c8def92011-06-12 20:01:58 +00003318 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003319 if (netdev == NULL) {
3320 status = -ENOMEM;
3321 goto rel_reg;
3322 }
3323 adapter = netdev_priv(netdev);
3324 adapter->pdev = pdev;
3325 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003326
3327 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003328 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003329 goto free_netdev;
3330
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003331 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003332 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003333
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003334 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003335 if (!status) {
3336 netdev->features |= NETIF_F_HIGHDMA;
3337 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003338 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003339 if (status) {
3340 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3341 goto free_netdev;
3342 }
3343 }
3344
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003345 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003346 if (adapter->sriov_enabled) {
3347 adapter->vf_cfg = kcalloc(num_vfs,
3348 sizeof(struct be_vf_cfg), GFP_KERNEL);
3349
3350 if (!adapter->vf_cfg)
3351 goto free_netdev;
3352 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003353
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003354 status = be_ctrl_init(adapter);
3355 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003356 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003357
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003358 if (lancer_chip(adapter)) {
3359 status = lancer_test_and_set_rdy_state(adapter);
3360 if (status) {
3361 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003362 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003363 }
3364 }
3365
Sathya Perla2243e2e2009-11-22 22:02:03 +00003366 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003367 if (be_physfn(adapter)) {
3368 status = be_cmd_POST(adapter);
3369 if (status)
3370 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003371 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003372
3373 /* tell fw we're ready to fire cmds */
3374 status = be_cmd_fw_init(adapter);
3375 if (status)
3376 goto ctrl_clean;
3377
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003378 status = be_cmd_reset_function(adapter);
3379 if (status)
3380 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003382 status = be_stats_init(adapter);
3383 if (status)
3384 goto ctrl_clean;
3385
Sathya Perla2243e2e2009-11-22 22:02:03 +00003386 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003387 if (status)
3388 goto stats_clean;
3389
Sathya Perla3abcded2010-10-03 22:12:27 -07003390 be_msix_enable(adapter);
3391
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003392 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003393
Sathya Perla5fb379e2009-06-18 00:02:59 +00003394 status = be_setup(adapter);
3395 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003396 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003397
Sathya Perla3abcded2010-10-03 22:12:27 -07003398 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003399 status = register_netdev(netdev);
3400 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003401 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003402 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003403
Ajit Khapardee6319362011-02-11 13:35:41 +00003404 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003405 u8 mac_speed;
3406 bool link_up;
3407 u16 vf, lnk_speed;
3408
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003409 if (!lancer_chip(adapter)) {
3410 status = be_vf_eth_addr_config(adapter);
3411 if (status)
3412 goto unreg_netdev;
3413 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003414
3415 for (vf = 0; vf < num_vfs; vf++) {
3416 status = be_cmd_link_status_query(adapter, &link_up,
3417 &mac_speed, &lnk_speed, vf + 1);
3418 if (!status)
3419 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3420 else
3421 goto unreg_netdev;
3422 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003423 }
3424
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003425 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003426 /* By default all priorities are enabled.
3427 * Needed in case of no GRP5 evt support
3428 */
3429 adapter->vlan_prio_bmap = 0xff;
3430
Somnath Koturf203af72010-10-25 23:01:03 +00003431 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003432 return 0;
3433
Ajit Khapardee6319362011-02-11 13:35:41 +00003434unreg_netdev:
3435 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003436unsetup:
3437 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003438msix_disable:
3439 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003440stats_clean:
3441 be_stats_cleanup(adapter);
3442ctrl_clean:
3443 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003444free_vf_cfg:
3445 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003447 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003448 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003449 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003450rel_reg:
3451 pci_release_regions(pdev);
3452disable_dev:
3453 pci_disable_device(pdev);
3454do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003455 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003456 return status;
3457}
3458
3459static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3460{
3461 struct be_adapter *adapter = pci_get_drvdata(pdev);
3462 struct net_device *netdev = adapter->netdev;
3463
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003464 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003465 if (adapter->wol)
3466 be_setup_wol(adapter, true);
3467
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003468 netif_device_detach(netdev);
3469 if (netif_running(netdev)) {
3470 rtnl_lock();
3471 be_close(netdev);
3472 rtnl_unlock();
3473 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003474 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003475 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003476
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003477 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003478 pci_save_state(pdev);
3479 pci_disable_device(pdev);
3480 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3481 return 0;
3482}
3483
3484static int be_resume(struct pci_dev *pdev)
3485{
3486 int status = 0;
3487 struct be_adapter *adapter = pci_get_drvdata(pdev);
3488 struct net_device *netdev = adapter->netdev;
3489
3490 netif_device_detach(netdev);
3491
3492 status = pci_enable_device(pdev);
3493 if (status)
3494 return status;
3495
3496 pci_set_power_state(pdev, 0);
3497 pci_restore_state(pdev);
3498
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003499 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003500 /* tell fw we're ready to fire cmds */
3501 status = be_cmd_fw_init(adapter);
3502 if (status)
3503 return status;
3504
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003505 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003506 if (netif_running(netdev)) {
3507 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003508 be_open(netdev);
3509 rtnl_unlock();
3510 }
3511 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003512
3513 if (adapter->wol)
3514 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003515
3516 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003517 return 0;
3518}
3519
Sathya Perla82456b02010-02-17 01:35:37 +00003520/*
3521 * An FLR will stop BE from DMAing any data.
3522 */
3523static void be_shutdown(struct pci_dev *pdev)
3524{
3525 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003526
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003527 if (!adapter)
3528 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003529
Sathya Perla0f4a6822011-03-21 20:49:28 +00003530 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003531
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003532 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003533
Sathya Perla82456b02010-02-17 01:35:37 +00003534 if (adapter->wol)
3535 be_setup_wol(adapter, true);
3536
Ajit Khaparde57841862011-04-06 18:08:43 +00003537 be_cmd_reset_function(adapter);
3538
Sathya Perla82456b02010-02-17 01:35:37 +00003539 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003540}
3541
Sathya Perlacf588472010-02-14 21:22:01 +00003542static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3543 pci_channel_state_t state)
3544{
3545 struct be_adapter *adapter = pci_get_drvdata(pdev);
3546 struct net_device *netdev = adapter->netdev;
3547
3548 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3549
3550 adapter->eeh_err = true;
3551
3552 netif_device_detach(netdev);
3553
3554 if (netif_running(netdev)) {
3555 rtnl_lock();
3556 be_close(netdev);
3557 rtnl_unlock();
3558 }
3559 be_clear(adapter);
3560
3561 if (state == pci_channel_io_perm_failure)
3562 return PCI_ERS_RESULT_DISCONNECT;
3563
3564 pci_disable_device(pdev);
3565
3566 return PCI_ERS_RESULT_NEED_RESET;
3567}
3568
3569static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3570{
3571 struct be_adapter *adapter = pci_get_drvdata(pdev);
3572 int status;
3573
3574 dev_info(&adapter->pdev->dev, "EEH reset\n");
3575 adapter->eeh_err = false;
3576
3577 status = pci_enable_device(pdev);
3578 if (status)
3579 return PCI_ERS_RESULT_DISCONNECT;
3580
3581 pci_set_master(pdev);
3582 pci_set_power_state(pdev, 0);
3583 pci_restore_state(pdev);
3584
3585 /* Check if card is ok and fw is ready */
3586 status = be_cmd_POST(adapter);
3587 if (status)
3588 return PCI_ERS_RESULT_DISCONNECT;
3589
3590 return PCI_ERS_RESULT_RECOVERED;
3591}
3592
3593static void be_eeh_resume(struct pci_dev *pdev)
3594{
3595 int status = 0;
3596 struct be_adapter *adapter = pci_get_drvdata(pdev);
3597 struct net_device *netdev = adapter->netdev;
3598
3599 dev_info(&adapter->pdev->dev, "EEH resume\n");
3600
3601 pci_save_state(pdev);
3602
3603 /* tell fw we're ready to fire cmds */
3604 status = be_cmd_fw_init(adapter);
3605 if (status)
3606 goto err;
3607
3608 status = be_setup(adapter);
3609 if (status)
3610 goto err;
3611
3612 if (netif_running(netdev)) {
3613 status = be_open(netdev);
3614 if (status)
3615 goto err;
3616 }
3617 netif_device_attach(netdev);
3618 return;
3619err:
3620 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003621}
3622
3623static struct pci_error_handlers be_eeh_handlers = {
3624 .error_detected = be_eeh_err_detected,
3625 .slot_reset = be_eeh_reset,
3626 .resume = be_eeh_resume,
3627};
3628
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003629static struct pci_driver be_driver = {
3630 .name = DRV_NAME,
3631 .id_table = be_dev_ids,
3632 .probe = be_probe,
3633 .remove = be_remove,
3634 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003635 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003636 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003637 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003638};
3639
3640static int __init be_init_module(void)
3641{
Joe Perches8e95a202009-12-03 07:58:21 +00003642 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3643 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003644 printk(KERN_WARNING DRV_NAME
3645 " : Module param rx_frag_size must be 2048/4096/8192."
3646 " Using 2048\n");
3647 rx_frag_size = 2048;
3648 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003649
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003650 return pci_register_driver(&be_driver);
3651}
3652module_init(be_init_module);
3653
3654static void __exit be_exit_module(void)
3655{
3656 pci_unregister_driver(&be_driver);
3657}
3658module_exit(be_exit_module);