blob: ae2d2622a18e5f7c4fff2c9bf84ec2a0f41e025a [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147
Sathya Perlacf588472010-02-14 21:22:01 +0000148 if (adapter->eeh_err)
149 return;
150
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 iowrite32(reg, addr);
159}
160
Sathya Perla8788fdc2009-07-27 22:52:03 +0000161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000166
167 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000176
177 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179}
180
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000188
189 if (adapter->eeh_err)
190 return;
191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000207
208 if (adapter->eeh_err)
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
Ajit Khapardef8617e02011-02-11 13:36:37 +0000232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000234 if (status)
235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000238 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000239netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000246static void populate_be2_stats(struct be_adapter *adapter)
247{
248
249 struct be_drv_stats *drvs = &adapter->drv_stats;
250 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
251 struct be_port_rxf_stats_v0 *port_stats =
252 be_port_rxf_stats_from_cmd(adapter);
253 struct be_rxf_stats_v0 *rxf_stats =
254 be_rxf_stats_from_cmd(adapter);
255
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270 drvs->rx_input_fifo_overflow_drop =
271 port_stats->rx_input_fifo_overflow;
272 drvs->rx_dropped_header_too_small =
273 port_stats->rx_dropped_header_too_small;
274 drvs->rx_address_match_errors =
275 port_stats->rx_address_match_errors;
276 drvs->rx_alignment_symbol_errors =
277 port_stats->rx_alignment_symbol_errors;
278
279 drvs->tx_pauseframes = port_stats->tx_pauseframes;
280 drvs->tx_controlframes = port_stats->tx_controlframes;
281
282 if (adapter->port_num)
283 drvs->jabber_events =
284 rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events =
287 rxf_stats->port0_jabber_events;
288 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
289 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
290 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
291 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
292 drvs->forwarded_packets = rxf_stats->forwarded_packets;
293 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
294 drvs->rx_drops_no_tpre_descr =
295 rxf_stats->rx_drops_no_tpre_descr;
296 drvs->rx_drops_too_many_frags =
297 rxf_stats->rx_drops_too_many_frags;
298 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
299}
300
301static void populate_be3_stats(struct be_adapter *adapter)
302{
303 struct be_drv_stats *drvs = &adapter->drv_stats;
304 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
305
306 struct be_rxf_stats_v1 *rxf_stats =
307 be_rxf_stats_from_cmd(adapter);
308 struct be_port_rxf_stats_v1 *port_stats =
309 be_port_rxf_stats_from_cmd(adapter);
310
311 drvs->rx_priority_pause_frames = 0;
312 drvs->pmem_fifo_overflow_drop = 0;
313 drvs->rx_pause_frames = port_stats->rx_pause_frames;
314 drvs->rx_crc_errors = port_stats->rx_crc_errors;
315 drvs->rx_control_frames = port_stats->rx_control_frames;
316 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
317 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
318 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
319 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
320 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
321 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
322 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
323 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
324 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
325 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
326 drvs->rx_dropped_header_too_small =
327 port_stats->rx_dropped_header_too_small;
328 drvs->rx_input_fifo_overflow_drop =
329 port_stats->rx_input_fifo_overflow_drop;
330 drvs->rx_address_match_errors =
331 port_stats->rx_address_match_errors;
332 drvs->rx_alignment_symbol_errors =
333 port_stats->rx_alignment_symbol_errors;
334 drvs->rxpp_fifo_overflow_drop =
335 port_stats->rxpp_fifo_overflow_drop;
336 drvs->tx_pauseframes = port_stats->tx_pauseframes;
337 drvs->tx_controlframes = port_stats->tx_controlframes;
338 drvs->jabber_events = port_stats->jabber_events;
339 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343 drvs->forwarded_packets = rxf_stats->forwarded_packets;
344 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
345 drvs->rx_drops_no_tpre_descr =
346 rxf_stats->rx_drops_no_tpre_descr;
347 drvs->rx_drops_too_many_frags =
348 rxf_stats->rx_drops_too_many_frags;
349 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350}
351
Selvin Xavier005d5692011-05-16 07:36:35 +0000352static void populate_lancer_stats(struct be_adapter *adapter)
353{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354
Selvin Xavier005d5692011-05-16 07:36:35 +0000355 struct be_drv_stats *drvs = &adapter->drv_stats;
356 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
357 (adapter);
358 drvs->rx_priority_pause_frames = 0;
359 drvs->pmem_fifo_overflow_drop = 0;
360 drvs->rx_pause_frames =
Selvin Xavieraedfebb2011-06-06 02:27:13 +0000361 make_64bit_val(pport_stats->rx_pause_frames_hi,
362 pport_stats->rx_pause_frames_lo);
Selvin Xavier005d5692011-05-16 07:36:35 +0000363 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
364 pport_stats->rx_crc_errors_lo);
365 drvs->rx_control_frames =
366 make_64bit_val(pport_stats->rx_control_frames_hi,
367 pport_stats->rx_control_frames_lo);
368 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long =
370 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
371 pport_stats->rx_frames_too_long_lo);
372 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376 drvs->rx_dropped_tcp_length =
377 pport_stats->rx_dropped_invalid_tcp_length;
378 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381 drvs->rx_dropped_header_too_small =
382 pport_stats->rx_dropped_header_too_small;
383 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
385 drvs->rx_alignment_symbol_errors =
386 make_64bit_val(pport_stats->rx_symbol_errors_hi,
387 pport_stats->rx_symbol_errors_lo);
388 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
390 pport_stats->tx_pause_frames_lo);
391 drvs->tx_controlframes =
392 make_64bit_val(pport_stats->tx_control_frames_hi,
393 pport_stats->tx_control_frames_lo);
394 drvs->jabber_events = pport_stats->rx_jabbers;
395 drvs->rx_drops_no_pbuf = 0;
396 drvs->rx_drops_no_txpb = 0;
397 drvs->rx_drops_no_erx_descr = 0;
398 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
399 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
400 pport_stats->num_forwards_lo);
401 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
402 pport_stats->rx_drops_mtu_lo);
403 drvs->rx_drops_no_tpre_descr = 0;
404 drvs->rx_drops_too_many_frags =
405 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
406 pport_stats->rx_drops_too_many_frags_lo);
407}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408
409void be_parse_stats(struct be_adapter *adapter)
410{
Selvin Xavier005d5692011-05-16 07:36:35 +0000411 if (adapter->generation == BE_GEN3) {
412 if (lancer_chip(adapter))
413 populate_lancer_stats(adapter);
414 else
415 populate_be3_stats(adapter);
416 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000418 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419}
420
Sathya Perlab31c50a2009-09-17 10:30:13 -0700421void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700422{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde78122a52009-10-07 03:11:20 -0700424 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700425 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000426 struct be_tx_obj *txo;
Sathya Perla6e533912011-06-26 20:40:48 +0000427 unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -0700428 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700429
Sathya Perla3abcded2010-10-03 22:12:27 -0700430 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6e533912011-06-26 20:40:48 +0000431 pkts += rx_stats(rxo)->rx_pkts;
432 bytes += rx_stats(rxo)->rx_bytes;
433 mcast += rx_stats(rxo)->rx_mcast_pkts;
Eric Dumazet1e758962011-07-12 22:09:18 -0700434 drops += rx_stats(rxo)->rx_dropped;
Sathya Perla3abcded2010-10-03 22:12:27 -0700435 /* no space in linux buffers: best possible approximation */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 if (adapter->generation == BE_GEN3) {
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 if (!(lancer_chip(adapter))) {
Sathya Perla6e533912011-06-26 20:40:48 +0000438 struct be_erx_stats_v1 *erx =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000439 be_erx_stats_from_cmd(adapter);
Sathya Perla6e533912011-06-26 20:40:48 +0000440 drops += erx->rx_drops_no_fragments[rxo->q.id];
Selvin Xavier005d5692011-05-16 07:36:35 +0000441 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000442 } else {
Sathya Perla6e533912011-06-26 20:40:48 +0000443 struct be_erx_stats_v0 *erx =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 be_erx_stats_from_cmd(adapter);
Sathya Perla6e533912011-06-26 20:40:48 +0000445 drops += erx->rx_drops_no_fragments[rxo->q.id];
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 }
Sathya Perla3abcded2010-10-03 22:12:27 -0700447 }
Sathya Perla6e533912011-06-26 20:40:48 +0000448 dev_stats->rx_packets = pkts;
449 dev_stats->rx_bytes = bytes;
450 dev_stats->multicast = mcast;
451 dev_stats->rx_dropped = drops;
Sathya Perla3abcded2010-10-03 22:12:27 -0700452
Sathya Perla6e533912011-06-26 20:40:48 +0000453 pkts = bytes = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000454 for_all_tx_queues(adapter, txo, i) {
Sathya Perla6e533912011-06-26 20:40:48 +0000455 pkts += tx_stats(txo)->be_tx_pkts;
456 bytes += tx_stats(txo)->be_tx_bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000457 }
Sathya Perla6e533912011-06-26 20:40:48 +0000458 dev_stats->tx_packets = pkts;
459 dev_stats->tx_bytes = bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700460
461 /* bad pkts received */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462 dev_stats->rx_errors = drvs->rx_crc_errors +
463 drvs->rx_alignment_symbol_errors +
464 drvs->rx_in_range_errors +
465 drvs->rx_out_range_errors +
466 drvs->rx_frame_too_long +
467 drvs->rx_dropped_too_small +
468 drvs->rx_dropped_too_short +
469 drvs->rx_dropped_header_too_small +
470 drvs->rx_dropped_tcp_length +
471 drvs->rx_dropped_runt +
472 drvs->rx_tcp_checksum_errs +
473 drvs->rx_ip_checksum_errs +
474 drvs->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700475
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476 /* detailed rx errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
478 drvs->rx_out_range_errors +
479 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000480
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000481 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482
483 /* frame alignment errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000484 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000485
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486 /* receiver fifo overrun */
487 /* drops_no_pbuf is no per i/f, it's per BE card */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000488 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
489 drvs->rx_input_fifo_overflow_drop +
490 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700491}
492
Sathya Perla8788fdc2009-07-27 22:52:03 +0000493void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700494{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700495 struct net_device *netdev = adapter->netdev;
496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000498 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000499 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000500 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501 netif_carrier_on(netdev);
502 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000503 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000504 netif_carrier_off(netdev);
505 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000507 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509}
510
511/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700512static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513{
Sathya Perla3abcded2010-10-03 22:12:27 -0700514 struct be_eq_obj *rx_eq = &rxo->rx_eq;
515 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700516 ulong now = jiffies;
517 u32 eqd;
518
519 if (!rx_eq->enable_aic)
520 return;
521
522 /* Wrapped around */
523 if (time_before(now, stats->rx_fps_jiffies)) {
524 stats->rx_fps_jiffies = now;
525 return;
526 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527
528 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700529 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 return;
531
Sathya Perla3abcded2010-10-03 22:12:27 -0700532 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700533 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534
Sathya Perla4097f662009-03-24 16:40:13 -0700535 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 stats->prev_rx_frags = stats->rx_frags;
537 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538 eqd = eqd << 3;
539 if (eqd > rx_eq->max_eqd)
540 eqd = rx_eq->max_eqd;
541 if (eqd < rx_eq->min_eqd)
542 eqd = rx_eq->min_eqd;
543 if (eqd < 10)
544 eqd = 0;
545 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000546 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700547
548 rx_eq->cur_eqd = eqd;
549}
550
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700551static u32 be_calc_rate(u64 bytes, unsigned long ticks)
552{
553 u64 rate = bytes;
554
555 do_div(rate, ticks / HZ);
556 rate <<= 3; /* bytes/sec -> bits/sec */
557 do_div(rate, 1000000ul); /* MB/Sec */
558
559 return rate;
560}
561
Sathya Perla3c8def92011-06-12 20:01:58 +0000562static void be_tx_rate_update(struct be_tx_obj *txo)
Sathya Perla4097f662009-03-24 16:40:13 -0700563{
Sathya Perla3c8def92011-06-12 20:01:58 +0000564 struct be_tx_stats *stats = tx_stats(txo);
Sathya Perla4097f662009-03-24 16:40:13 -0700565 ulong now = jiffies;
566
567 /* Wrapped around? */
568 if (time_before(now, stats->be_tx_jiffies)) {
569 stats->be_tx_jiffies = now;
570 return;
571 }
572
573 /* Update tx rate once in two seconds */
574 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700575 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
576 - stats->be_tx_bytes_prev,
577 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700578 stats->be_tx_jiffies = now;
579 stats->be_tx_bytes_prev = stats->be_tx_bytes;
580 }
581}
582
Sathya Perla3c8def92011-06-12 20:01:58 +0000583static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000584 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585{
Sathya Perla3c8def92011-06-12 20:01:58 +0000586 struct be_tx_stats *stats = tx_stats(txo);
587
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588 stats->be_tx_reqs++;
589 stats->be_tx_wrbs += wrb_cnt;
590 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000591 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592 if (stopped)
593 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594}
595
596/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000597static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
598 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700599{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700600 int cnt = (skb->len > skb->data_len);
601
602 cnt += skb_shinfo(skb)->nr_frags;
603
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 /* to account for hdr wrb */
605 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000606 if (lancer_chip(adapter) || !(cnt & 1)) {
607 *dummy = false;
608 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 /* add a dummy to make it an even num */
610 cnt++;
611 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
614 return cnt;
615}
616
617static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
618{
619 wrb->frag_pa_hi = upper_32_bits(addr);
620 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
621 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
622}
623
Somnath Koturcc4ce022010-10-21 07:11:14 -0700624static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
625 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700627 u8 vlan_prio = 0;
628 u16 vlan_tag = 0;
629
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700630 memset(hdr, 0, sizeof(*hdr));
631
632 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
633
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000634 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
637 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000638 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000639 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000640 if (lancer_chip(adapter) && adapter->sli_family ==
641 LANCER_A0_SLI_FAMILY) {
642 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
643 if (is_tcp_pkt(skb))
644 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
645 tcpcs, hdr, 1);
646 else if (is_udp_pkt(skb))
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
648 udpcs, hdr, 1);
649 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
651 if (is_tcp_pkt(skb))
652 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
653 else if (is_udp_pkt(skb))
654 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
655 }
656
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700657 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700659 vlan_tag = vlan_tx_tag_get(skb);
660 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
661 /* If vlan priority provided by OS is NOT in available bmap */
662 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
663 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
664 adapter->recommended_prio;
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666 }
667
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
672}
673
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000675 bool unmap_single)
676{
677 dma_addr_t dma;
678
679 be_dws_le_to_cpu(wrb, sizeof(*wrb));
680
681 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000682 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000683 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000684 dma_unmap_single(dev, dma, wrb->frag_len,
685 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000686 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000687 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000688 }
689}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690
Sathya Perla3c8def92011-06-12 20:01:58 +0000691static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
693{
Sathya Perla7101e112010-03-22 20:41:12 +0000694 dma_addr_t busaddr;
695 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000696 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 struct be_eth_wrb *wrb;
699 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000700 bool map_single = false;
701 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703 hdr = queue_head_node(txq);
704 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000705 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
David S. Millerebc8d2a2009-06-09 01:01:31 -0700707 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700708 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000709 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
710 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000711 goto dma_err;
712 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700713 wrb = queue_head_node(txq);
714 wrb_fill(wrb, busaddr, len);
715 be_dws_cpu_to_le(wrb, sizeof(*wrb));
716 queue_head_inc(txq);
717 copied += len;
718 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719
David S. Millerebc8d2a2009-06-09 01:01:31 -0700720 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
721 struct skb_frag_struct *frag =
722 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000723 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
724 frag->size, DMA_TO_DEVICE);
725 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000726 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700727 wrb = queue_head_node(txq);
728 wrb_fill(wrb, busaddr, frag->size);
729 be_dws_cpu_to_le(wrb, sizeof(*wrb));
730 queue_head_inc(txq);
731 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732 }
733
734 if (dummy_wrb) {
735 wrb = queue_head_node(txq);
736 wrb_fill(wrb, 0, 0);
737 be_dws_cpu_to_le(wrb, sizeof(*wrb));
738 queue_head_inc(txq);
739 }
740
Somnath Koturcc4ce022010-10-21 07:11:14 -0700741 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742 be_dws_cpu_to_le(hdr, sizeof(*hdr));
743
744 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000745dma_err:
746 txq->head = map_head;
747 while (copied) {
748 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000749 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000750 map_single = false;
751 copied -= wrb->frag_len;
752 queue_head_inc(txq);
753 }
754 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755}
756
Stephen Hemminger613573252009-08-31 19:50:58 +0000757static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700758 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759{
760 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000761 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
762 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 u32 wrb_cnt = 0, copied = 0;
764 u32 start = txq->head;
765 bool dummy_wrb, stopped = false;
766
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000767 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768
Sathya Perla3c8def92011-06-12 20:01:58 +0000769 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000770 if (copied) {
771 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000772 BUG_ON(txo->sent_skb_list[start]);
773 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000775 /* Ensure txq has space for the next skb; Else stop the queue
776 * *BEFORE* ringing the tx doorbell, so that we serialze the
777 * tx compls of the current transmit which'll wake up the queue
778 */
Sathya Perla7101e112010-03-22 20:41:12 +0000779 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000780 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
781 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000782 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000783 stopped = true;
784 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000786 be_txq_notify(adapter, txq->id, wrb_cnt);
787
Sathya Perla3c8def92011-06-12 20:01:58 +0000788 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000789 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000790 } else {
791 txq->head = start;
792 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 return NETDEV_TX_OK;
795}
796
797static int be_change_mtu(struct net_device *netdev, int new_mtu)
798{
799 struct be_adapter *adapter = netdev_priv(netdev);
800 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000801 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
802 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700803 dev_info(&adapter->pdev->dev,
804 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000805 BE_MIN_MTU,
806 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807 return -EINVAL;
808 }
809 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
810 netdev->mtu, new_mtu);
811 netdev->mtu = new_mtu;
812 return 0;
813}
814
815/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000816 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
817 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000819static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821 u16 vtag[BE_NUM_VLANS_SUPPORTED];
822 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000823 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000824 u32 if_handle;
825
826 if (vf) {
827 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
828 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
829 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
830 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831
Ajit Khaparde82903e42010-02-09 01:34:57 +0000832 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000834 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835 if (adapter->vlan_tag[i]) {
836 vtag[ntags] = cpu_to_le16(i);
837 ntags++;
838 }
839 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700840 status = be_cmd_vlan_config(adapter, adapter->if_handle,
841 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700843 status = be_cmd_vlan_config(adapter, adapter->if_handle,
844 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000846
Sathya Perlab31c50a2009-09-17 10:30:13 -0700847 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848}
849
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
851{
852 struct be_adapter *adapter = netdev_priv(netdev);
853
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000854 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000855 if (!be_physfn(adapter))
856 return;
857
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000859 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000860 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861}
862
863static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
864{
865 struct be_adapter *adapter = netdev_priv(netdev);
866
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000867 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000868
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000869 if (!be_physfn(adapter))
870 return;
871
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000873 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000874 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875}
876
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877static void be_set_multicast_list(struct net_device *netdev)
878{
879 struct be_adapter *adapter = netdev_priv(netdev);
880
881 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000882 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000883 adapter->promiscuous = true;
884 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700885 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000886
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300887 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000888 if (adapter->promiscuous) {
889 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000890 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000891 }
892
Sathya Perlae7b909a2009-11-22 22:01:10 +0000893 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000894 if (netdev->flags & IFF_ALLMULTI ||
895 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000896 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000897 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000898 goto done;
899 }
900
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000901 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800902 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000903done:
904 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700905}
906
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000907static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
908{
909 struct be_adapter *adapter = netdev_priv(netdev);
910 int status;
911
912 if (!adapter->sriov_enabled)
913 return -EPERM;
914
915 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
916 return -EINVAL;
917
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000918 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
919 status = be_cmd_pmac_del(adapter,
920 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000921 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000922
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000923 status = be_cmd_pmac_add(adapter, mac,
924 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000925 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000926
927 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000928 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
929 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000930 else
931 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
932
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000933 return status;
934}
935
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000936static int be_get_vf_config(struct net_device *netdev, int vf,
937 struct ifla_vf_info *vi)
938{
939 struct be_adapter *adapter = netdev_priv(netdev);
940
941 if (!adapter->sriov_enabled)
942 return -EPERM;
943
944 if (vf >= num_vfs)
945 return -EINVAL;
946
947 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000948 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000949 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000950 vi->qos = 0;
951 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
952
953 return 0;
954}
955
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000956static int be_set_vf_vlan(struct net_device *netdev,
957 int vf, u16 vlan, u8 qos)
958{
959 struct be_adapter *adapter = netdev_priv(netdev);
960 int status = 0;
961
962 if (!adapter->sriov_enabled)
963 return -EPERM;
964
965 if ((vf >= num_vfs) || (vlan > 4095))
966 return -EINVAL;
967
968 if (vlan) {
969 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
970 adapter->vlans_added++;
971 } else {
972 adapter->vf_cfg[vf].vf_vlan_tag = 0;
973 adapter->vlans_added--;
974 }
975
976 status = be_vid_config(adapter, true, vf);
977
978 if (status)
979 dev_info(&adapter->pdev->dev,
980 "VLAN %d config on VF %d failed\n", vlan, vf);
981 return status;
982}
983
Ajit Khapardee1d18732010-07-23 01:52:13 +0000984static int be_set_vf_tx_rate(struct net_device *netdev,
985 int vf, int rate)
986{
987 struct be_adapter *adapter = netdev_priv(netdev);
988 int status = 0;
989
990 if (!adapter->sriov_enabled)
991 return -EPERM;
992
993 if ((vf >= num_vfs) || (rate < 0))
994 return -EINVAL;
995
996 if (rate > 10000)
997 rate = 10000;
998
999 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +00001000 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001001
1002 if (status)
1003 dev_info(&adapter->pdev->dev,
1004 "tx rate %d on VF %d failed\n", rate, vf);
1005 return status;
1006}
1007
Sathya Perla3abcded2010-10-03 22:12:27 -07001008static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009{
Sathya Perla3abcded2010-10-03 22:12:27 -07001010 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001011 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012
Sathya Perla4097f662009-03-24 16:40:13 -07001013 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001014 if (time_before(now, stats->rx_jiffies)) {
1015 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001016 return;
1017 }
1018
1019 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -07001020 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001021 return;
1022
Sathya Perla3abcded2010-10-03 22:12:27 -07001023 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1024 now - stats->rx_jiffies);
1025 stats->rx_jiffies = now;
1026 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -07001027}
1028
Sathya Perla3abcded2010-10-03 22:12:27 -07001029static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001030 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001031{
Sathya Perla3abcded2010-10-03 22:12:27 -07001032 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001033
Sathya Perla3abcded2010-10-03 22:12:27 -07001034 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001035 stats->rx_frags += rxcp->num_rcvd;
1036 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001037 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001038 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001039 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001040 if (rxcp->err)
1041 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042}
1043
Sathya Perla2e588f82011-03-11 02:49:26 +00001044static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001045{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001046 /* L4 checksum is not reliable for non TCP/UDP packets.
1047 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001048 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1049 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001050}
1051
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001053get_rx_page_info(struct be_adapter *adapter,
1054 struct be_rx_obj *rxo,
1055 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001056{
1057 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001058 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059
Sathya Perla3abcded2010-10-03 22:12:27 -07001060 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061 BUG_ON(!rx_page_info->page);
1062
Ajit Khaparde205859a2010-02-09 01:34:21 +00001063 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001064 dma_unmap_page(&adapter->pdev->dev,
1065 dma_unmap_addr(rx_page_info, bus),
1066 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001067 rx_page_info->last_page_user = false;
1068 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069
1070 atomic_dec(&rxq->used);
1071 return rx_page_info;
1072}
1073
1074/* Throwaway the data in the Rx completion */
1075static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001076 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001077 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078{
Sathya Perla3abcded2010-10-03 22:12:27 -07001079 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001081 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001083 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001084 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001085 put_page(page_info->page);
1086 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001087 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088 }
1089}
1090
1091/*
1092 * skb_fill_rx_data forms a complete skb for an ether frame
1093 * indicated by rxcp.
1094 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001095static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001096 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097{
Sathya Perla3abcded2010-10-03 22:12:27 -07001098 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001100 u16 i, j;
1101 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102 u8 *start;
1103
Sathya Perla2e588f82011-03-11 02:49:26 +00001104 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105 start = page_address(page_info->page) + page_info->page_offset;
1106 prefetch(start);
1107
1108 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001109 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110
1111 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001112 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113 memcpy(skb->data, start, hdr_len);
1114 skb->len = curr_frag_len;
1115 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1116 /* Complete packet has now been moved to data */
1117 put_page(page_info->page);
1118 skb->data_len = 0;
1119 skb->tail += curr_frag_len;
1120 } else {
1121 skb_shinfo(skb)->nr_frags = 1;
1122 skb_shinfo(skb)->frags[0].page = page_info->page;
1123 skb_shinfo(skb)->frags[0].page_offset =
1124 page_info->page_offset + hdr_len;
1125 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1126 skb->data_len = curr_frag_len - hdr_len;
1127 skb->tail += hdr_len;
1128 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001129 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130
Sathya Perla2e588f82011-03-11 02:49:26 +00001131 if (rxcp->pkt_size <= rx_frag_size) {
1132 BUG_ON(rxcp->num_rcvd != 1);
1133 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134 }
1135
1136 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001137 index_inc(&rxcp->rxq_idx, rxq->len);
1138 remaining = rxcp->pkt_size - curr_frag_len;
1139 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1140 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1141 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001143 /* Coalesce all frags from the same physical page in one slot */
1144 if (page_info->page_offset == 0) {
1145 /* Fresh page */
1146 j++;
1147 skb_shinfo(skb)->frags[j].page = page_info->page;
1148 skb_shinfo(skb)->frags[j].page_offset =
1149 page_info->page_offset;
1150 skb_shinfo(skb)->frags[j].size = 0;
1151 skb_shinfo(skb)->nr_frags++;
1152 } else {
1153 put_page(page_info->page);
1154 }
1155
1156 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157 skb->len += curr_frag_len;
1158 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
Sathya Perla2e588f82011-03-11 02:49:26 +00001160 remaining -= curr_frag_len;
1161 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001162 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001164 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165}
1166
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001167/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001169 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001170 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001172 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001174
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001175 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001176 if (unlikely(!skb)) {
Eric Dumazet1e758962011-07-12 22:09:18 -07001177 rxo->stats.rx_dropped++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001178 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179 return;
1180 }
1181
Sathya Perla2e588f82011-03-11 02:49:26 +00001182 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001184 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001185 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001186 else
1187 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188
1189 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001190 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001191 if (adapter->netdev->features & NETIF_F_RXHASH)
1192 skb->rxhash = rxcp->rss_hash;
1193
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001195 if (unlikely(rxcp->vlanf))
1196 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1197
1198 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199}
1200
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001201/* Process the RX completion indicated by rxcp when GRO is enabled */
1202static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001203 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001204 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205{
1206 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001207 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001208 struct be_queue_info *rxq = &rxo->q;
1209 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001210 u16 remaining, curr_frag_len;
1211 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001212
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001213 skb = napi_get_frags(&eq_obj->napi);
1214 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001215 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001216 return;
1217 }
1218
Sathya Perla2e588f82011-03-11 02:49:26 +00001219 remaining = rxcp->pkt_size;
1220 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1221 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222
1223 curr_frag_len = min(remaining, rx_frag_size);
1224
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001225 /* Coalesce all frags from the same physical page in one slot */
1226 if (i == 0 || page_info->page_offset == 0) {
1227 /* First frag or Fresh page */
1228 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001229 skb_shinfo(skb)->frags[j].page = page_info->page;
1230 skb_shinfo(skb)->frags[j].page_offset =
1231 page_info->page_offset;
1232 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001233 } else {
1234 put_page(page_info->page);
1235 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001236 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001237
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001239 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240 memset(page_info, 0, sizeof(*page_info));
1241 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001242 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001244 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001245 skb->len = rxcp->pkt_size;
1246 skb->data_len = rxcp->pkt_size;
1247 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001248 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001249 if (adapter->netdev->features & NETIF_F_RXHASH)
1250 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001251
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001252 if (unlikely(rxcp->vlanf))
1253 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1254
1255 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256}
1257
Sathya Perla2e588f82011-03-11 02:49:26 +00001258static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1259 struct be_eth_rx_compl *compl,
1260 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261{
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 rxcp->pkt_size =
1263 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1264 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1265 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1266 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001267 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001268 rxcp->ip_csum =
1269 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1270 rxcp->l4_csum =
1271 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1272 rxcp->ipv6 =
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1274 rxcp->rxq_idx =
1275 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1276 rxcp->num_rcvd =
1277 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1278 rxcp->pkt_type =
1279 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001280 rxcp->rss_hash =
1281 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001282 if (rxcp->vlanf) {
1283 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001284 compl);
1285 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1286 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001287 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001288}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289
Sathya Perla2e588f82011-03-11 02:49:26 +00001290static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1291 struct be_eth_rx_compl *compl,
1292 struct be_rx_compl_info *rxcp)
1293{
1294 rxcp->pkt_size =
1295 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1296 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1297 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1298 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001299 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001300 rxcp->ip_csum =
1301 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1302 rxcp->l4_csum =
1303 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1304 rxcp->ipv6 =
1305 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1306 rxcp->rxq_idx =
1307 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1308 rxcp->num_rcvd =
1309 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1310 rxcp->pkt_type =
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001312 rxcp->rss_hash =
1313 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001314 if (rxcp->vlanf) {
1315 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001316 compl);
1317 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1318 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001319 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001320}
1321
1322static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1323{
1324 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1325 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1326 struct be_adapter *adapter = rxo->adapter;
1327
1328 /* For checking the valid bit it is Ok to use either definition as the
1329 * valid bit is at the same position in both v0 and v1 Rx compl */
1330 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001331 return NULL;
1332
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001333 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001334 be_dws_le_to_cpu(compl, sizeof(*compl));
1335
1336 if (adapter->be3_native)
1337 be_parse_rx_compl_v1(adapter, compl, rxcp);
1338 else
1339 be_parse_rx_compl_v0(adapter, compl, rxcp);
1340
Sathya Perla15d72182011-03-21 20:49:26 +00001341 if (rxcp->vlanf) {
1342 /* vlanf could be wrongly set in some cards.
1343 * ignore if vtm is not set */
1344 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1345 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001346
Sathya Perla15d72182011-03-21 20:49:26 +00001347 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001348 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001349
David S. Miller3c709f82011-05-11 14:26:15 -04001350 if (((adapter->pvid & VLAN_VID_MASK) ==
1351 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1352 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001353 rxcp->vlanf = 0;
1354 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001355
1356 /* As the compl has been parsed, reset it; we wont touch it again */
1357 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358
Sathya Perla3abcded2010-10-03 22:12:27 -07001359 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 return rxcp;
1361}
1362
Eric Dumazet1829b082011-03-01 05:48:12 +00001363static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001366
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001368 gfp |= __GFP_COMP;
1369 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370}
1371
1372/*
1373 * Allocate a page, split it to fragments of size rx_frag_size and post as
1374 * receive buffers to BE
1375 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001376static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377{
Sathya Perla3abcded2010-10-03 22:12:27 -07001378 struct be_adapter *adapter = rxo->adapter;
1379 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001380 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001381 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382 struct page *pagep = NULL;
1383 struct be_eth_rx_d *rxd;
1384 u64 page_dmaaddr = 0, frag_dmaaddr;
1385 u32 posted, page_offset = 0;
1386
Sathya Perla3abcded2010-10-03 22:12:27 -07001387 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1389 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001390 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001392 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 break;
1394 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001395 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1396 0, adapter->big_page_size,
1397 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 page_info->page_offset = 0;
1399 } else {
1400 get_page(pagep);
1401 page_info->page_offset = page_offset + rx_frag_size;
1402 }
1403 page_offset = page_info->page_offset;
1404 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001405 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1407
1408 rxd = queue_head_node(rxq);
1409 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1410 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411
1412 /* Any space left in the current big page for another frag? */
1413 if ((page_offset + rx_frag_size + rx_frag_size) >
1414 adapter->big_page_size) {
1415 pagep = NULL;
1416 page_info->last_page_user = true;
1417 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001418
1419 prev_page_info = page_info;
1420 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421 page_info = &page_info_tbl[rxq->head];
1422 }
1423 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001424 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
1426 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001428 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001429 } else if (atomic_read(&rxq->used) == 0) {
1430 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001431 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433}
1434
Sathya Perla5fb379e2009-06-18 00:02:59 +00001435static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1438
1439 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1440 return NULL;
1441
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001442 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1444
1445 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1446
1447 queue_tail_inc(tx_cq);
1448 return txcp;
1449}
1450
Sathya Perla3c8def92011-06-12 20:01:58 +00001451static u16 be_tx_compl_process(struct be_adapter *adapter,
1452 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453{
Sathya Perla3c8def92011-06-12 20:01:58 +00001454 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001455 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001456 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001458 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1459 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001461 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001463 sent_skbs[txq->tail] = NULL;
1464
1465 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001466 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001468 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001470 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001471 unmap_tx_frag(&adapter->pdev->dev, wrb,
1472 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001473 unmap_skb_hdr = false;
1474
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475 num_wrbs++;
1476 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001477 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001480 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481}
1482
Sathya Perla859b1e42009-08-10 03:43:51 +00001483static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1484{
1485 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1486
1487 if (!eqe->evt)
1488 return NULL;
1489
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001490 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001491 eqe->evt = le32_to_cpu(eqe->evt);
1492 queue_tail_inc(&eq_obj->q);
1493 return eqe;
1494}
1495
1496static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001497 struct be_eq_obj *eq_obj,
1498 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001499{
1500 struct be_eq_entry *eqe;
1501 u16 num = 0;
1502
1503 while ((eqe = event_get(eq_obj)) != NULL) {
1504 eqe->evt = 0;
1505 num++;
1506 }
1507
1508 /* Deal with any spurious interrupts that come
1509 * without events
1510 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001511 if (!num)
1512 rearm = true;
1513
1514 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001515 if (num)
1516 napi_schedule(&eq_obj->napi);
1517
1518 return num;
1519}
1520
1521/* Just read and notify events without processing them.
1522 * Used at the time of destroying event queues */
1523static void be_eq_clean(struct be_adapter *adapter,
1524 struct be_eq_obj *eq_obj)
1525{
1526 struct be_eq_entry *eqe;
1527 u16 num = 0;
1528
1529 while ((eqe = event_get(eq_obj)) != NULL) {
1530 eqe->evt = 0;
1531 num++;
1532 }
1533
1534 if (num)
1535 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1536}
1537
Sathya Perla3abcded2010-10-03 22:12:27 -07001538static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539{
1540 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001541 struct be_queue_info *rxq = &rxo->q;
1542 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001543 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 u16 tail;
1545
1546 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001547 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1548 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001549 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 }
1551
1552 /* Then free posted rx buffer that were not used */
1553 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001554 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001555 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 put_page(page_info->page);
1557 memset(page_info, 0, sizeof(*page_info));
1558 }
1559 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001560 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561}
1562
Sathya Perla3c8def92011-06-12 20:01:58 +00001563static void be_tx_compl_clean(struct be_adapter *adapter,
1564 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565{
Sathya Perla3c8def92011-06-12 20:01:58 +00001566 struct be_queue_info *tx_cq = &txo->cq;
1567 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001568 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001569 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001570 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001571 struct sk_buff *sent_skb;
1572 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573
Sathya Perlaa8e91792009-08-10 03:42:43 +00001574 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1575 do {
1576 while ((txcp = be_tx_compl_get(tx_cq))) {
1577 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1578 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001579 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001580 cmpl++;
1581 }
1582 if (cmpl) {
1583 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001584 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001585 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001586 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001587 }
1588
1589 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1590 break;
1591
1592 mdelay(1);
1593 } while (true);
1594
1595 if (atomic_read(&txq->used))
1596 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1597 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001598
1599 /* free posted tx for which compls will never arrive */
1600 while (atomic_read(&txq->used)) {
1601 sent_skb = sent_skbs[txq->tail];
1602 end_idx = txq->tail;
1603 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001604 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1605 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001606 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001607 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001608 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609}
1610
Sathya Perla5fb379e2009-06-18 00:02:59 +00001611static void be_mcc_queues_destroy(struct be_adapter *adapter)
1612{
1613 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001614
Sathya Perla8788fdc2009-07-27 22:52:03 +00001615 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001616 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001617 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001618 be_queue_free(adapter, q);
1619
Sathya Perla8788fdc2009-07-27 22:52:03 +00001620 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001621 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001622 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001623 be_queue_free(adapter, q);
1624}
1625
1626/* Must be called only after TX qs are created as MCC shares TX EQ */
1627static int be_mcc_queues_create(struct be_adapter *adapter)
1628{
1629 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001630
1631 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001632 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001633 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001634 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001635 goto err;
1636
1637 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001638 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001639 goto mcc_cq_free;
1640
1641 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001642 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001643 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1644 goto mcc_cq_destroy;
1645
1646 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001647 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001648 goto mcc_q_free;
1649
1650 return 0;
1651
1652mcc_q_free:
1653 be_queue_free(adapter, q);
1654mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001655 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001656mcc_cq_free:
1657 be_queue_free(adapter, cq);
1658err:
1659 return -1;
1660}
1661
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662static void be_tx_queues_destroy(struct be_adapter *adapter)
1663{
1664 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001665 struct be_tx_obj *txo;
1666 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001667
Sathya Perla3c8def92011-06-12 20:01:58 +00001668 for_all_tx_queues(adapter, txo, i) {
1669 q = &txo->q;
1670 if (q->created)
1671 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1672 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673
Sathya Perla3c8def92011-06-12 20:01:58 +00001674 q = &txo->cq;
1675 if (q->created)
1676 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1677 be_queue_free(adapter, q);
1678 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679
Sathya Perla859b1e42009-08-10 03:43:51 +00001680 /* Clear any residual events */
1681 be_eq_clean(adapter, &adapter->tx_eq);
1682
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683 q = &adapter->tx_eq.q;
1684 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001685 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686 be_queue_free(adapter, q);
1687}
1688
Sathya Perla3c8def92011-06-12 20:01:58 +00001689/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001690static int be_tx_queues_create(struct be_adapter *adapter)
1691{
1692 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001693 struct be_tx_obj *txo;
1694 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
1696 adapter->tx_eq.max_eqd = 0;
1697 adapter->tx_eq.min_eqd = 0;
1698 adapter->tx_eq.cur_eqd = 96;
1699 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001700
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001702 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1703 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 return -1;
1705
Sathya Perla8788fdc2009-07-27 22:52:03 +00001706 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001707 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001708 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001709
Sathya Perla3c8def92011-06-12 20:01:58 +00001710 for_all_tx_queues(adapter, txo, i) {
1711 cq = &txo->cq;
1712 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001714 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715
Sathya Perla3c8def92011-06-12 20:01:58 +00001716 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1717 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718
Sathya Perla3c8def92011-06-12 20:01:58 +00001719 q = &txo->q;
1720 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1721 sizeof(struct be_eth_wrb)))
1722 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723
Sathya Perla3c8def92011-06-12 20:01:58 +00001724 if (be_cmd_txq_create(adapter, q, cq))
1725 goto err;
1726 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727 return 0;
1728
Sathya Perla3c8def92011-06-12 20:01:58 +00001729err:
1730 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 return -1;
1732}
1733
1734static void be_rx_queues_destroy(struct be_adapter *adapter)
1735{
1736 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001737 struct be_rx_obj *rxo;
1738 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739
Sathya Perla3abcded2010-10-03 22:12:27 -07001740 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001741 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001742
Sathya Perla3abcded2010-10-03 22:12:27 -07001743 q = &rxo->cq;
1744 if (q->created)
1745 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1746 be_queue_free(adapter, q);
1747
Sathya Perla3abcded2010-10-03 22:12:27 -07001748 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001749 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001750 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001751 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753}
1754
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001755static u32 be_num_rxqs_want(struct be_adapter *adapter)
1756{
Sathya Perlac814fd32011-06-26 20:41:25 +00001757 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001758 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1759 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1760 } else {
1761 dev_warn(&adapter->pdev->dev,
1762 "No support for multiple RX queues\n");
1763 return 1;
1764 }
1765}
1766
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767static int be_rx_queues_create(struct be_adapter *adapter)
1768{
1769 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001770 struct be_rx_obj *rxo;
1771 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001773 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1774 msix_enabled(adapter) ?
1775 adapter->num_msix_vec - 1 : 1);
1776 if (adapter->num_rx_qs != MAX_RX_QS)
1777 dev_warn(&adapter->pdev->dev,
1778 "Can create only %d RX queues", adapter->num_rx_qs);
1779
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001781 for_all_rx_queues(adapter, rxo, i) {
1782 rxo->adapter = adapter;
1783 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1784 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001785
Sathya Perla3abcded2010-10-03 22:12:27 -07001786 /* EQ */
1787 eq = &rxo->rx_eq.q;
1788 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1789 sizeof(struct be_eq_entry));
1790 if (rc)
1791 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792
Sathya Perla3abcded2010-10-03 22:12:27 -07001793 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1794 if (rc)
1795 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001797 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001798
Sathya Perla3abcded2010-10-03 22:12:27 -07001799 /* CQ */
1800 cq = &rxo->cq;
1801 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1802 sizeof(struct be_eth_rx_compl));
1803 if (rc)
1804 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805
Sathya Perla3abcded2010-10-03 22:12:27 -07001806 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1807 if (rc)
1808 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001809
1810 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001811 q = &rxo->q;
1812 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1813 sizeof(struct be_eth_rx_d));
1814 if (rc)
1815 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816
Sathya Perla3abcded2010-10-03 22:12:27 -07001817 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818
1819 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001820err:
1821 be_rx_queues_destroy(adapter);
1822 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001825static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001826{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001827 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1828 if (!eqe->evt)
1829 return false;
1830 else
1831 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001832}
1833
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834static irqreturn_t be_intx(int irq, void *dev)
1835{
1836 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001837 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001838 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001840 if (lancer_chip(adapter)) {
1841 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001842 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001843 for_all_rx_queues(adapter, rxo, i) {
1844 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001845 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001846 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001848 if (!(tx || rx))
1849 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001850
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001851 } else {
1852 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1853 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1854 if (!isr)
1855 return IRQ_NONE;
1856
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001857 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001858 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001859
1860 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001861 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001862 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001863 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001864 }
Sathya Perlac001c212009-07-01 01:06:07 +00001865
Sathya Perla8788fdc2009-07-27 22:52:03 +00001866 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867}
1868
1869static irqreturn_t be_msix_rx(int irq, void *dev)
1870{
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 struct be_rx_obj *rxo = dev;
1872 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873
Sathya Perla3c8def92011-06-12 20:01:58 +00001874 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875
1876 return IRQ_HANDLED;
1877}
1878
Sathya Perla5fb379e2009-06-18 00:02:59 +00001879static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880{
1881 struct be_adapter *adapter = dev;
1882
Sathya Perla3c8def92011-06-12 20:01:58 +00001883 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884
1885 return IRQ_HANDLED;
1886}
1887
Sathya Perla2e588f82011-03-11 02:49:26 +00001888static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889{
Sathya Perla2e588f82011-03-11 02:49:26 +00001890 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891}
1892
stephen hemminger49b05222010-10-21 07:50:48 +00001893static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894{
1895 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001896 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1897 struct be_adapter *adapter = rxo->adapter;
1898 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001899 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900 u32 work_done;
1901
Sathya Perla3abcded2010-10-03 22:12:27 -07001902 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001904 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 if (!rxcp)
1906 break;
1907
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001908 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001909 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001910 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001911 be_rx_compl_process_gro(adapter, rxo, rxcp);
1912 else
1913 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001914 } else if (rxcp->pkt_size == 0) {
1915 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001916 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001917
Sathya Perla2e588f82011-03-11 02:49:26 +00001918 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919 }
1920
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001922 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001923 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924
1925 /* All consumed */
1926 if (work_done < budget) {
1927 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001928 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929 } else {
1930 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001931 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 }
1933 return work_done;
1934}
1935
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001936/* As TX and MCC share the same EQ check for both TX and MCC completions.
1937 * For TX/MCC we don't honour budget; consume everything
1938 */
1939static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001941 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1942 struct be_adapter *adapter =
1943 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001944 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001946 int tx_compl, mcc_compl, status = 0;
1947 u8 i;
1948 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949
Sathya Perla3c8def92011-06-12 20:01:58 +00001950 for_all_tx_queues(adapter, txo, i) {
1951 tx_compl = 0;
1952 num_wrbs = 0;
1953 while ((txcp = be_tx_compl_get(&txo->cq))) {
1954 num_wrbs += be_tx_compl_process(adapter, txo,
1955 AMAP_GET_BITS(struct amap_eth_tx_compl,
1956 wrb_index, txcp));
1957 tx_compl++;
1958 }
1959 if (tx_compl) {
1960 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1961
1962 atomic_sub(num_wrbs, &txo->q.used);
1963
1964 /* As Tx wrbs have been freed up, wake up netdev queue
1965 * if it was stopped due to lack of tx wrbs. */
1966 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1967 atomic_read(&txo->q.used) < txo->q.len / 2) {
1968 netif_wake_subqueue(adapter->netdev, i);
1969 }
1970
1971 adapter->drv_stats.be_tx_events++;
1972 txo->stats.be_tx_compl += tx_compl;
1973 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974 }
1975
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001976 mcc_compl = be_process_mcc(adapter, &status);
1977
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001978 if (mcc_compl) {
1979 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1980 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1981 }
1982
Sathya Perla3c8def92011-06-12 20:01:58 +00001983 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001984
Sathya Perla3c8def92011-06-12 20:01:58 +00001985 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986 return 1;
1987}
1988
Ajit Khaparded053de92010-09-03 06:23:30 +00001989void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001990{
1991 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1992 u32 i;
1993
1994 pci_read_config_dword(adapter->pdev,
1995 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1998 pci_read_config_dword(adapter->pdev,
1999 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2000 pci_read_config_dword(adapter->pdev,
2001 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2002
2003 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2004 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2005
Ajit Khaparded053de92010-09-03 06:23:30 +00002006 if (ue_status_lo || ue_status_hi) {
2007 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002008 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00002009 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2010 }
2011
Ajit Khaparde7c185272010-07-29 06:16:33 +00002012 if (ue_status_lo) {
2013 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2014 if (ue_status_lo & 1)
2015 dev_err(&adapter->pdev->dev,
2016 "UE: %s bit set\n", ue_status_low_desc[i]);
2017 }
2018 }
2019 if (ue_status_hi) {
2020 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2021 if (ue_status_hi & 1)
2022 dev_err(&adapter->pdev->dev,
2023 "UE: %s bit set\n", ue_status_hi_desc[i]);
2024 }
2025 }
2026
2027}
2028
Sathya Perlaea1dae12009-03-19 23:56:20 -07002029static void be_worker(struct work_struct *work)
2030{
2031 struct be_adapter *adapter =
2032 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002033 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002034 struct be_tx_obj *txo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002035 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002036
Sathya Perla16da8252011-03-21 20:49:27 +00002037 if (!adapter->ue_detected && !lancer_chip(adapter))
2038 be_detect_dump_ue(adapter);
2039
Somnath Koturf203af72010-10-25 23:01:03 +00002040 /* when interrupts are not yet enabled, just reap any pending
2041 * mcc completions */
2042 if (!netif_running(adapter->netdev)) {
2043 int mcc_compl, status = 0;
2044
2045 mcc_compl = be_process_mcc(adapter, &status);
2046
2047 if (mcc_compl) {
2048 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2049 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2050 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002051
Somnath Koturf203af72010-10-25 23:01:03 +00002052 goto reschedule;
2053 }
2054
Selvin Xavier005d5692011-05-16 07:36:35 +00002055 if (!adapter->stats_cmd_sent) {
2056 if (lancer_chip(adapter))
2057 lancer_cmd_get_pport_stats(adapter,
2058 &adapter->stats_cmd);
2059 else
2060 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2061 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002062
2063 for_all_tx_queues(adapter, txo, i)
2064 be_tx_rate_update(txo);
Sathya Perla4097f662009-03-24 16:40:13 -07002065
Sathya Perla3abcded2010-10-03 22:12:27 -07002066 for_all_rx_queues(adapter, rxo, i) {
2067 be_rx_rate_update(rxo);
2068 be_rx_eqd_update(adapter, rxo);
2069
2070 if (rxo->rx_post_starved) {
2071 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002072 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002073 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002074 }
2075
Somnath Koturf203af72010-10-25 23:01:03 +00002076reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002077 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002078 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2079}
2080
Sathya Perla8d56ff12009-11-22 22:02:26 +00002081static void be_msix_disable(struct be_adapter *adapter)
2082{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002083 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002084 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002085 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002086 }
2087}
2088
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002089static void be_msix_enable(struct be_adapter *adapter)
2090{
Sathya Perla3abcded2010-10-03 22:12:27 -07002091#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002092 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002094 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002095
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002096 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002097 adapter->msix_entries[i].entry = i;
2098
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002099 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002100 if (status == 0) {
2101 goto done;
2102 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002103 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002104 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002105 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002106 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002107 }
2108 return;
2109done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002110 adapter->num_msix_vec = num_vec;
2111 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112}
2113
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002114static void be_sriov_enable(struct be_adapter *adapter)
2115{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002116 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002117#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002118 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002119 int status, pos;
2120 u16 nvfs;
2121
2122 pos = pci_find_ext_capability(adapter->pdev,
2123 PCI_EXT_CAP_ID_SRIOV);
2124 pci_read_config_word(adapter->pdev,
2125 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2126
2127 if (num_vfs > nvfs) {
2128 dev_info(&adapter->pdev->dev,
2129 "Device supports %d VFs and not %d\n",
2130 nvfs, num_vfs);
2131 num_vfs = nvfs;
2132 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002133
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002134 status = pci_enable_sriov(adapter->pdev, num_vfs);
2135 adapter->sriov_enabled = status ? false : true;
2136 }
2137#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002138}
2139
2140static void be_sriov_disable(struct be_adapter *adapter)
2141{
2142#ifdef CONFIG_PCI_IOV
2143 if (adapter->sriov_enabled) {
2144 pci_disable_sriov(adapter->pdev);
2145 adapter->sriov_enabled = false;
2146 }
2147#endif
2148}
2149
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002150static inline int be_msix_vec_get(struct be_adapter *adapter,
2151 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002153 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002154}
2155
2156static int be_request_irq(struct be_adapter *adapter,
2157 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002158 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002159{
2160 struct net_device *netdev = adapter->netdev;
2161 int vec;
2162
2163 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002164 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002165 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002166}
2167
Sathya Perla3abcded2010-10-03 22:12:27 -07002168static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2169 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002170{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002171 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002172 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173}
2174
2175static int be_msix_register(struct be_adapter *adapter)
2176{
Sathya Perla3abcded2010-10-03 22:12:27 -07002177 struct be_rx_obj *rxo;
2178 int status, i;
2179 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180
Sathya Perla3abcded2010-10-03 22:12:27 -07002181 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2182 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183 if (status)
2184 goto err;
2185
Sathya Perla3abcded2010-10-03 22:12:27 -07002186 for_all_rx_queues(adapter, rxo, i) {
2187 sprintf(qname, "rxq%d", i);
2188 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2189 qname, rxo);
2190 if (status)
2191 goto err_msix;
2192 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002193
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002195
Sathya Perla3abcded2010-10-03 22:12:27 -07002196err_msix:
2197 be_free_irq(adapter, &adapter->tx_eq, adapter);
2198
2199 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2200 be_free_irq(adapter, &rxo->rx_eq, rxo);
2201
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202err:
2203 dev_warn(&adapter->pdev->dev,
2204 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002205 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206 return status;
2207}
2208
2209static int be_irq_register(struct be_adapter *adapter)
2210{
2211 struct net_device *netdev = adapter->netdev;
2212 int status;
2213
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002214 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215 status = be_msix_register(adapter);
2216 if (status == 0)
2217 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002218 /* INTx is not supported for VF */
2219 if (!be_physfn(adapter))
2220 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221 }
2222
2223 /* INTx */
2224 netdev->irq = adapter->pdev->irq;
2225 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2226 adapter);
2227 if (status) {
2228 dev_err(&adapter->pdev->dev,
2229 "INTx request IRQ failed - err %d\n", status);
2230 return status;
2231 }
2232done:
2233 adapter->isr_registered = true;
2234 return 0;
2235}
2236
2237static void be_irq_unregister(struct be_adapter *adapter)
2238{
2239 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002240 struct be_rx_obj *rxo;
2241 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242
2243 if (!adapter->isr_registered)
2244 return;
2245
2246 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002247 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248 free_irq(netdev->irq, adapter);
2249 goto done;
2250 }
2251
2252 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002253 be_free_irq(adapter, &adapter->tx_eq, adapter);
2254
2255 for_all_rx_queues(adapter, rxo, i)
2256 be_free_irq(adapter, &rxo->rx_eq, rxo);
2257
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258done:
2259 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260}
2261
Sathya Perla482c9e72011-06-29 23:33:17 +00002262static void be_rx_queues_clear(struct be_adapter *adapter)
2263{
2264 struct be_queue_info *q;
2265 struct be_rx_obj *rxo;
2266 int i;
2267
2268 for_all_rx_queues(adapter, rxo, i) {
2269 q = &rxo->q;
2270 if (q->created) {
2271 be_cmd_rxq_destroy(adapter, q);
2272 /* After the rxq is invalidated, wait for a grace time
2273 * of 1ms for all dma to end and the flush compl to
2274 * arrive
2275 */
2276 mdelay(1);
2277 be_rx_q_clean(adapter, rxo);
2278 }
2279
2280 /* Clear any residual events */
2281 q = &rxo->rx_eq.q;
2282 if (q->created)
2283 be_eq_clean(adapter, &rxo->rx_eq);
2284 }
2285}
2286
Sathya Perla889cd4b2010-05-30 23:33:45 +00002287static int be_close(struct net_device *netdev)
2288{
2289 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002290 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002291 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002292 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002293 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002294
Sathya Perla889cd4b2010-05-30 23:33:45 +00002295 be_async_mcc_disable(adapter);
2296
Sathya Perla889cd4b2010-05-30 23:33:45 +00002297 netif_carrier_off(netdev);
2298 adapter->link_up = false;
2299
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002300 if (!lancer_chip(adapter))
2301 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002302
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002303 for_all_rx_queues(adapter, rxo, i)
2304 napi_disable(&rxo->rx_eq.napi);
2305
2306 napi_disable(&tx_eq->napi);
2307
2308 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002309 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2310 for_all_rx_queues(adapter, rxo, i)
2311 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002312 for_all_tx_queues(adapter, txo, i)
2313 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002314 }
2315
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002316 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002317 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002318 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002319
2320 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002321 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002322 synchronize_irq(vec);
2323 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002324 } else {
2325 synchronize_irq(netdev->irq);
2326 }
2327 be_irq_unregister(adapter);
2328
Sathya Perla889cd4b2010-05-30 23:33:45 +00002329 /* Wait for all pending tx completions to arrive so that
2330 * all tx skbs are freed.
2331 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002332 for_all_tx_queues(adapter, txo, i)
2333 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002334
Sathya Perla482c9e72011-06-29 23:33:17 +00002335 be_rx_queues_clear(adapter);
2336 return 0;
2337}
2338
2339static int be_rx_queues_setup(struct be_adapter *adapter)
2340{
2341 struct be_rx_obj *rxo;
2342 int rc, i;
2343 u8 rsstable[MAX_RSS_QS];
2344
2345 for_all_rx_queues(adapter, rxo, i) {
2346 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2347 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2348 adapter->if_handle,
2349 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2350 if (rc)
2351 return rc;
2352 }
2353
2354 if (be_multi_rxq(adapter)) {
2355 for_all_rss_queues(adapter, rxo, i)
2356 rsstable[i] = rxo->rss_id;
2357
2358 rc = be_cmd_rss_config(adapter, rsstable,
2359 adapter->num_rx_qs - 1);
2360 if (rc)
2361 return rc;
2362 }
2363
2364 /* First time posting */
2365 for_all_rx_queues(adapter, rxo, i) {
2366 be_post_rx_frags(rxo, GFP_KERNEL);
2367 napi_enable(&rxo->rx_eq.napi);
2368 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002369 return 0;
2370}
2371
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372static int be_open(struct net_device *netdev)
2373{
2374 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002375 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002376 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002377 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002378 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002379 u8 mac_speed;
2380 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002381
Sathya Perla482c9e72011-06-29 23:33:17 +00002382 status = be_rx_queues_setup(adapter);
2383 if (status)
2384 goto err;
2385
Sathya Perla5fb379e2009-06-18 00:02:59 +00002386 napi_enable(&tx_eq->napi);
2387
2388 be_irq_register(adapter);
2389
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002390 if (!lancer_chip(adapter))
2391 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002392
2393 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002394 for_all_rx_queues(adapter, rxo, i) {
2395 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2396 be_cq_notify(adapter, rxo->cq.id, true, 0);
2397 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002398 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002399
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002400 /* Now that interrupts are on we can process async mcc */
2401 be_async_mcc_enable(adapter);
2402
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002403 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002404 &link_speed, 0);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002405 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002406 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002407 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002408
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002409 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002410 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002411 if (status)
2412 goto err;
2413
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002414 status = be_cmd_set_flow_control(adapter,
2415 adapter->tx_fc, adapter->rx_fc);
2416 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002417 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002418 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002419
Sathya Perla889cd4b2010-05-30 23:33:45 +00002420 return 0;
2421err:
2422 be_close(adapter->netdev);
2423 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002424}
2425
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002426static int be_setup_wol(struct be_adapter *adapter, bool enable)
2427{
2428 struct be_dma_mem cmd;
2429 int status = 0;
2430 u8 mac[ETH_ALEN];
2431
2432 memset(mac, 0, ETH_ALEN);
2433
2434 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002435 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2436 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002437 if (cmd.va == NULL)
2438 return -1;
2439 memset(cmd.va, 0, cmd.size);
2440
2441 if (enable) {
2442 status = pci_write_config_dword(adapter->pdev,
2443 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2444 if (status) {
2445 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002446 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002447 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2448 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002449 return status;
2450 }
2451 status = be_cmd_enable_magic_wol(adapter,
2452 adapter->netdev->dev_addr, &cmd);
2453 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2454 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2455 } else {
2456 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2457 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2458 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2459 }
2460
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002461 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002462 return status;
2463}
2464
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002465/*
2466 * Generate a seed MAC address from the PF MAC Address using jhash.
2467 * MAC Address for VFs are assigned incrementally starting from the seed.
2468 * These addresses are programmed in the ASIC by the PF and the VF driver
2469 * queries for the MAC address during its probe.
2470 */
2471static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2472{
2473 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002474 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002475 u8 mac[ETH_ALEN];
2476
2477 be_vf_eth_addr_generate(adapter, mac);
2478
2479 for (vf = 0; vf < num_vfs; vf++) {
2480 status = be_cmd_pmac_add(adapter, mac,
2481 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002482 &adapter->vf_cfg[vf].vf_pmac_id,
2483 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002484 if (status)
2485 dev_err(&adapter->pdev->dev,
2486 "Mac address add failed for VF %d\n", vf);
2487 else
2488 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2489
2490 mac[5] += 1;
2491 }
2492 return status;
2493}
2494
2495static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2496{
2497 u32 vf;
2498
2499 for (vf = 0; vf < num_vfs; vf++) {
2500 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2501 be_cmd_pmac_del(adapter,
2502 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002503 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002504 }
2505}
2506
Sathya Perla5fb379e2009-06-18 00:02:59 +00002507static int be_setup(struct be_adapter *adapter)
2508{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002509 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002510 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002511 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002512 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002514 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2515 BE_IF_FLAGS_BROADCAST |
2516 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002517
2518 if (be_physfn(adapter)) {
2519 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2520 BE_IF_FLAGS_PROMISCUOUS |
2521 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2522 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002523
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002524 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002525 cap_flags |= BE_IF_FLAGS_RSS;
2526 en_flags |= BE_IF_FLAGS_RSS;
2527 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002528 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002529
2530 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2531 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002532 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533 if (status != 0)
2534 goto do_none;
2535
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002536 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002537 if (adapter->sriov_enabled) {
2538 while (vf < num_vfs) {
2539 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2540 BE_IF_FLAGS_BROADCAST;
2541 status = be_cmd_if_create(adapter, cap_flags,
2542 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002543 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002544 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002545 if (status) {
2546 dev_err(&adapter->pdev->dev,
2547 "Interface Create failed for VF %d\n",
2548 vf);
2549 goto if_destroy;
2550 }
2551 adapter->vf_cfg[vf].vf_pmac_id =
2552 BE_INVALID_PMAC_ID;
2553 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002554 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002555 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002556 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002557 status = be_cmd_mac_addr_query(adapter, mac,
2558 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2559 if (!status) {
2560 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2561 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2562 }
2563 }
2564
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002565 status = be_tx_queues_create(adapter);
2566 if (status != 0)
2567 goto if_destroy;
2568
2569 status = be_rx_queues_create(adapter);
2570 if (status != 0)
2571 goto tx_qs_destroy;
2572
Sathya Perla2903dd62011-06-26 20:41:53 +00002573 /* Allow all priorities by default. A GRP5 evt may modify this */
2574 adapter->vlan_prio_bmap = 0xff;
2575
Sathya Perla5fb379e2009-06-18 00:02:59 +00002576 status = be_mcc_queues_create(adapter);
2577 if (status != 0)
2578 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002580 adapter->link_speed = -1;
2581
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002582 return 0;
2583
Sathya Perla5fb379e2009-06-18 00:02:59 +00002584rx_qs_destroy:
2585 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002586tx_qs_destroy:
2587 be_tx_queues_destroy(adapter);
2588if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002589 if (be_physfn(adapter) && adapter->sriov_enabled)
2590 for (vf = 0; vf < num_vfs; vf++)
2591 if (adapter->vf_cfg[vf].vf_if_handle)
2592 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002593 adapter->vf_cfg[vf].vf_if_handle,
2594 vf + 1);
2595 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002596do_none:
2597 return status;
2598}
2599
Sathya Perla5fb379e2009-06-18 00:02:59 +00002600static int be_clear(struct be_adapter *adapter)
2601{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002602 int vf;
2603
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002604 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002605 be_vf_eth_addr_rem(adapter);
2606
Sathya Perla1a8887d2009-08-17 00:58:41 +00002607 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002608 be_rx_queues_destroy(adapter);
2609 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002610 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002611
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002612 if (be_physfn(adapter) && adapter->sriov_enabled)
2613 for (vf = 0; vf < num_vfs; vf++)
2614 if (adapter->vf_cfg[vf].vf_if_handle)
2615 be_cmd_if_destroy(adapter,
2616 adapter->vf_cfg[vf].vf_if_handle,
2617 vf + 1);
2618
Ajit Khaparde658681f2011-02-11 13:34:46 +00002619 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002620
Sathya Perla2243e2e2009-11-22 22:02:03 +00002621 /* tell fw we're done with firing cmds */
2622 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002623 return 0;
2624}
2625
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002626
Ajit Khaparde84517482009-09-04 03:12:16 +00002627#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002628static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002629 const u8 *p, u32 img_start, int image_size,
2630 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002631{
2632 u32 crc_offset;
2633 u8 flashed_crc[4];
2634 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002635
2636 crc_offset = hdr_size + img_start + image_size - 4;
2637
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002638 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002639
2640 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002641 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002642 if (status) {
2643 dev_err(&adapter->pdev->dev,
2644 "could not get crc from flash, not flashing redboot\n");
2645 return false;
2646 }
2647
2648 /*update redboot only if crc does not match*/
2649 if (!memcmp(flashed_crc, p, 4))
2650 return false;
2651 else
2652 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002653}
2654
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002655static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002656 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002657 struct be_dma_mem *flash_cmd, int num_of_images)
2658
Ajit Khaparde84517482009-09-04 03:12:16 +00002659{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002660 int status = 0, i, filehdr_size = 0;
2661 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002662 int num_bytes;
2663 const u8 *p = fw->data;
2664 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002665 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002666 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002667
Joe Perches215faf92010-12-21 02:16:10 -08002668 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002669 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2670 FLASH_IMAGE_MAX_SIZE_g3},
2671 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2672 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2673 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2674 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2675 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2676 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2677 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2678 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2679 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2680 FLASH_IMAGE_MAX_SIZE_g3},
2681 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2682 FLASH_IMAGE_MAX_SIZE_g3},
2683 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002684 FLASH_IMAGE_MAX_SIZE_g3},
2685 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2686 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002687 };
Joe Perches215faf92010-12-21 02:16:10 -08002688 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002689 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2690 FLASH_IMAGE_MAX_SIZE_g2},
2691 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2692 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2693 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2694 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2695 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2696 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2697 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2698 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2699 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2700 FLASH_IMAGE_MAX_SIZE_g2},
2701 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2702 FLASH_IMAGE_MAX_SIZE_g2},
2703 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2704 FLASH_IMAGE_MAX_SIZE_g2}
2705 };
2706
2707 if (adapter->generation == BE_GEN3) {
2708 pflashcomp = gen3_flash_types;
2709 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002710 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002711 } else {
2712 pflashcomp = gen2_flash_types;
2713 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002714 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002715 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002716 for (i = 0; i < num_comp; i++) {
2717 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2718 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2719 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002720 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2721 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002722 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2723 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002724 continue;
2725 p = fw->data;
2726 p += filehdr_size + pflashcomp[i].offset
2727 + (num_of_images * sizeof(struct image_hdr));
2728 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002729 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002730 total_bytes = pflashcomp[i].size;
2731 while (total_bytes) {
2732 if (total_bytes > 32*1024)
2733 num_bytes = 32*1024;
2734 else
2735 num_bytes = total_bytes;
2736 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002737
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002738 if (!total_bytes)
2739 flash_op = FLASHROM_OPER_FLASH;
2740 else
2741 flash_op = FLASHROM_OPER_SAVE;
2742 memcpy(req->params.data_buf, p, num_bytes);
2743 p += num_bytes;
2744 status = be_cmd_write_flashrom(adapter, flash_cmd,
2745 pflashcomp[i].optype, flash_op, num_bytes);
2746 if (status) {
2747 dev_err(&adapter->pdev->dev,
2748 "cmd to write to flash rom failed.\n");
2749 return -1;
2750 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002751 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002752 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002753 return 0;
2754}
2755
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002756static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2757{
2758 if (fhdr == NULL)
2759 return 0;
2760 if (fhdr->build[0] == '3')
2761 return BE_GEN3;
2762 else if (fhdr->build[0] == '2')
2763 return BE_GEN2;
2764 else
2765 return 0;
2766}
2767
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002768static int lancer_fw_download(struct be_adapter *adapter,
2769 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002770{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002771#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2772#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2773 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002774 const u8 *data_ptr = NULL;
2775 u8 *dest_image_ptr = NULL;
2776 size_t image_size = 0;
2777 u32 chunk_size = 0;
2778 u32 data_written = 0;
2779 u32 offset = 0;
2780 int status = 0;
2781 u8 add_status = 0;
2782
2783 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2784 dev_err(&adapter->pdev->dev,
2785 "FW Image not properly aligned. "
2786 "Length must be 4 byte aligned.\n");
2787 status = -EINVAL;
2788 goto lancer_fw_exit;
2789 }
2790
2791 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2792 + LANCER_FW_DOWNLOAD_CHUNK;
2793 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2794 &flash_cmd.dma, GFP_KERNEL);
2795 if (!flash_cmd.va) {
2796 status = -ENOMEM;
2797 dev_err(&adapter->pdev->dev,
2798 "Memory allocation failure while flashing\n");
2799 goto lancer_fw_exit;
2800 }
2801
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002802 dest_image_ptr = flash_cmd.va +
2803 sizeof(struct lancer_cmd_req_write_object);
2804 image_size = fw->size;
2805 data_ptr = fw->data;
2806
2807 while (image_size) {
2808 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2809
2810 /* Copy the image chunk content. */
2811 memcpy(dest_image_ptr, data_ptr, chunk_size);
2812
2813 status = lancer_cmd_write_object(adapter, &flash_cmd,
2814 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2815 &data_written, &add_status);
2816
2817 if (status)
2818 break;
2819
2820 offset += data_written;
2821 data_ptr += data_written;
2822 image_size -= data_written;
2823 }
2824
2825 if (!status) {
2826 /* Commit the FW written */
2827 status = lancer_cmd_write_object(adapter, &flash_cmd,
2828 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2829 &data_written, &add_status);
2830 }
2831
2832 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2833 flash_cmd.dma);
2834 if (status) {
2835 dev_err(&adapter->pdev->dev,
2836 "Firmware load error. "
2837 "Status code: 0x%x Additional Status: 0x%x\n",
2838 status, add_status);
2839 goto lancer_fw_exit;
2840 }
2841
2842 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2843lancer_fw_exit:
2844 return status;
2845}
2846
2847static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2848{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002849 struct flash_file_hdr_g2 *fhdr;
2850 struct flash_file_hdr_g3 *fhdr3;
2851 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002852 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002853 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002854 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002855
2856 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002857 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002858
Ajit Khaparde84517482009-09-04 03:12:16 +00002859 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002860 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2861 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002862 if (!flash_cmd.va) {
2863 status = -ENOMEM;
2864 dev_err(&adapter->pdev->dev,
2865 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002866 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002867 }
2868
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002869 if ((adapter->generation == BE_GEN3) &&
2870 (get_ufigen_type(fhdr) == BE_GEN3)) {
2871 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002872 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2873 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002874 img_hdr_ptr = (struct image_hdr *) (fw->data +
2875 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002876 i * sizeof(struct image_hdr)));
2877 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2878 status = be_flash_data(adapter, fw, &flash_cmd,
2879 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002880 }
2881 } else if ((adapter->generation == BE_GEN2) &&
2882 (get_ufigen_type(fhdr) == BE_GEN2)) {
2883 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2884 } else {
2885 dev_err(&adapter->pdev->dev,
2886 "UFI and Interface are not compatible for flashing\n");
2887 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002888 }
2889
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002890 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2891 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002892 if (status) {
2893 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002894 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002895 }
2896
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002897 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002898
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002899be_fw_exit:
2900 return status;
2901}
2902
2903int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2904{
2905 const struct firmware *fw;
2906 int status;
2907
2908 if (!netif_running(adapter->netdev)) {
2909 dev_err(&adapter->pdev->dev,
2910 "Firmware load not allowed (interface is down)\n");
2911 return -1;
2912 }
2913
2914 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2915 if (status)
2916 goto fw_exit;
2917
2918 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2919
2920 if (lancer_chip(adapter))
2921 status = lancer_fw_download(adapter, fw);
2922 else
2923 status = be_fw_download(adapter, fw);
2924
Ajit Khaparde84517482009-09-04 03:12:16 +00002925fw_exit:
2926 release_firmware(fw);
2927 return status;
2928}
2929
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002930static struct net_device_ops be_netdev_ops = {
2931 .ndo_open = be_open,
2932 .ndo_stop = be_close,
2933 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002934 .ndo_set_rx_mode = be_set_multicast_list,
2935 .ndo_set_mac_address = be_mac_addr_set,
2936 .ndo_change_mtu = be_change_mtu,
2937 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002938 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2939 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002940 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002941 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002942 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002943 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944};
2945
2946static void be_netdev_init(struct net_device *netdev)
2947{
2948 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002949 struct be_rx_obj *rxo;
2950 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002951
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002952 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002953 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2954 NETIF_F_HW_VLAN_TX;
2955 if (be_multi_rxq(adapter))
2956 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002957
2958 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002959 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002960
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002961 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002962 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002963
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002964 netdev->flags |= IFF_MULTICAST;
2965
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002966 /* Default settings for Rx and Tx flow control */
2967 adapter->rx_fc = true;
2968 adapter->tx_fc = true;
2969
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002970 netif_set_gso_max_size(netdev, 65535);
2971
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002972 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2973
2974 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2975
Sathya Perla3abcded2010-10-03 22:12:27 -07002976 for_all_rx_queues(adapter, rxo, i)
2977 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2978 BE_NAPI_WEIGHT);
2979
Sathya Perla5fb379e2009-06-18 00:02:59 +00002980 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002982}
2983
2984static void be_unmap_pci_bars(struct be_adapter *adapter)
2985{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002986 if (adapter->csr)
2987 iounmap(adapter->csr);
2988 if (adapter->db)
2989 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002990 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002991 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002992}
2993
2994static int be_map_pci_bars(struct be_adapter *adapter)
2995{
2996 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002997 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002999 if (lancer_chip(adapter)) {
3000 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3001 pci_resource_len(adapter->pdev, 0));
3002 if (addr == NULL)
3003 return -ENOMEM;
3004 adapter->db = addr;
3005 return 0;
3006 }
3007
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003008 if (be_physfn(adapter)) {
3009 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3010 pci_resource_len(adapter->pdev, 2));
3011 if (addr == NULL)
3012 return -ENOMEM;
3013 adapter->csr = addr;
3014 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003015
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003016 if (adapter->generation == BE_GEN2) {
3017 pcicfg_reg = 1;
3018 db_reg = 4;
3019 } else {
3020 pcicfg_reg = 0;
3021 if (be_physfn(adapter))
3022 db_reg = 4;
3023 else
3024 db_reg = 0;
3025 }
3026 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3027 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003028 if (addr == NULL)
3029 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003030 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003031
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003032 if (be_physfn(adapter)) {
3033 addr = ioremap_nocache(
3034 pci_resource_start(adapter->pdev, pcicfg_reg),
3035 pci_resource_len(adapter->pdev, pcicfg_reg));
3036 if (addr == NULL)
3037 goto pci_map_err;
3038 adapter->pcicfg = addr;
3039 } else
3040 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003041
3042 return 0;
3043pci_map_err:
3044 be_unmap_pci_bars(adapter);
3045 return -ENOMEM;
3046}
3047
3048
3049static void be_ctrl_cleanup(struct be_adapter *adapter)
3050{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003051 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052
3053 be_unmap_pci_bars(adapter);
3054
3055 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003056 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3057 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003058
3059 mem = &adapter->mc_cmd_mem;
3060 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003061 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3062 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003063}
3064
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003065static int be_ctrl_init(struct be_adapter *adapter)
3066{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003067 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3068 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003069 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071
3072 status = be_map_pci_bars(adapter);
3073 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003074 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003075
3076 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003077 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3078 mbox_mem_alloc->size,
3079 &mbox_mem_alloc->dma,
3080 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003081 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003082 status = -ENOMEM;
3083 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003084 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00003085
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3087 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3088 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3089 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003090
3091 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003092 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3093 mc_cmd_mem->size, &mc_cmd_mem->dma,
3094 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003095 if (mc_cmd_mem->va == NULL) {
3096 status = -ENOMEM;
3097 goto free_mbox;
3098 }
3099 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3100
Ivan Vecera29849612010-12-14 05:43:19 +00003101 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003102 spin_lock_init(&adapter->mcc_lock);
3103 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003104
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003105 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003106 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003107 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003108
3109free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003110 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3111 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003112
3113unmap_pci_bars:
3114 be_unmap_pci_bars(adapter);
3115
3116done:
3117 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003118}
3119
3120static void be_stats_cleanup(struct be_adapter *adapter)
3121{
Sathya Perla3abcded2010-10-03 22:12:27 -07003122 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003123
3124 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003125 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3126 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003127}
3128
3129static int be_stats_init(struct be_adapter *adapter)
3130{
Sathya Perla3abcded2010-10-03 22:12:27 -07003131 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003132
Selvin Xavier005d5692011-05-16 07:36:35 +00003133 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003134 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003135 } else {
3136 if (lancer_chip(adapter))
3137 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3138 else
3139 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3140 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003141 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3142 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143 if (cmd->va == NULL)
3144 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003145 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003146 return 0;
3147}
3148
3149static void __devexit be_remove(struct pci_dev *pdev)
3150{
3151 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003152
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003153 if (!adapter)
3154 return;
3155
Somnath Koturf203af72010-10-25 23:01:03 +00003156 cancel_delayed_work_sync(&adapter->work);
3157
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003158 unregister_netdev(adapter->netdev);
3159
Sathya Perla5fb379e2009-06-18 00:02:59 +00003160 be_clear(adapter);
3161
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003162 be_stats_cleanup(adapter);
3163
3164 be_ctrl_cleanup(adapter);
3165
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003166 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003167 be_sriov_disable(adapter);
3168
Sathya Perla8d56ff12009-11-22 22:02:26 +00003169 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003170
3171 pci_set_drvdata(pdev, NULL);
3172 pci_release_regions(pdev);
3173 pci_disable_device(pdev);
3174
3175 free_netdev(adapter->netdev);
3176}
3177
Sathya Perla2243e2e2009-11-22 22:02:03 +00003178static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003179{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003180 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003181 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003182
Sathya Perla8788fdc2009-07-27 22:52:03 +00003183 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003184 if (status)
3185 return status;
3186
Sathya Perla3abcded2010-10-03 22:12:27 -07003187 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3188 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003189 if (status)
3190 return status;
3191
3192 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003193
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003194 /* A default permanent address is given to each VF for Lancer*/
3195 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003196 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003197 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003198
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003199 if (status)
3200 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003201
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003202 if (!is_valid_ether_addr(mac))
3203 return -EADDRNOTAVAIL;
3204
3205 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3206 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3207 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003208
Ajit Khaparde3486be22010-07-23 02:04:54 +00003209 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003210 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3211 else
3212 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3213
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003214 status = be_cmd_get_cntl_attributes(adapter);
3215 if (status)
3216 return status;
3217
Sathya Perla2e588f82011-03-11 02:49:26 +00003218 be_cmd_check_native_mode(adapter);
Sathya Perla3c8def92011-06-12 20:01:58 +00003219
3220 if ((num_vfs && adapter->sriov_enabled) ||
3221 (adapter->function_mode & 0x400) ||
3222 lancer_chip(adapter) || !be_physfn(adapter)) {
3223 adapter->num_tx_qs = 1;
3224 netif_set_real_num_tx_queues(adapter->netdev,
3225 adapter->num_tx_qs);
3226 } else {
3227 adapter->num_tx_qs = MAX_TX_QS;
3228 }
3229
Sathya Perla2243e2e2009-11-22 22:02:03 +00003230 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003231}
3232
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003233static int be_dev_family_check(struct be_adapter *adapter)
3234{
3235 struct pci_dev *pdev = adapter->pdev;
3236 u32 sli_intf = 0, if_type;
3237
3238 switch (pdev->device) {
3239 case BE_DEVICE_ID1:
3240 case OC_DEVICE_ID1:
3241 adapter->generation = BE_GEN2;
3242 break;
3243 case BE_DEVICE_ID2:
3244 case OC_DEVICE_ID2:
3245 adapter->generation = BE_GEN3;
3246 break;
3247 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003248 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003249 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3250 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3251 SLI_INTF_IF_TYPE_SHIFT;
3252
3253 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3254 if_type != 0x02) {
3255 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3256 return -EINVAL;
3257 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003258 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3259 SLI_INTF_FAMILY_SHIFT);
3260 adapter->generation = BE_GEN3;
3261 break;
3262 default:
3263 adapter->generation = 0;
3264 }
3265 return 0;
3266}
3267
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003268static int lancer_wait_ready(struct be_adapter *adapter)
3269{
3270#define SLIPORT_READY_TIMEOUT 500
3271 u32 sliport_status;
3272 int status = 0, i;
3273
3274 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3275 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3276 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3277 break;
3278
3279 msleep(20);
3280 }
3281
3282 if (i == SLIPORT_READY_TIMEOUT)
3283 status = -1;
3284
3285 return status;
3286}
3287
3288static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3289{
3290 int status;
3291 u32 sliport_status, err, reset_needed;
3292 status = lancer_wait_ready(adapter);
3293 if (!status) {
3294 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3295 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3296 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3297 if (err && reset_needed) {
3298 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3299 adapter->db + SLIPORT_CONTROL_OFFSET);
3300
3301 /* check adapter has corrected the error */
3302 status = lancer_wait_ready(adapter);
3303 sliport_status = ioread32(adapter->db +
3304 SLIPORT_STATUS_OFFSET);
3305 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3306 SLIPORT_STATUS_RN_MASK);
3307 if (status || sliport_status)
3308 status = -1;
3309 } else if (err || reset_needed) {
3310 status = -1;
3311 }
3312 }
3313 return status;
3314}
3315
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003316static int __devinit be_probe(struct pci_dev *pdev,
3317 const struct pci_device_id *pdev_id)
3318{
3319 int status = 0;
3320 struct be_adapter *adapter;
3321 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003322
3323 status = pci_enable_device(pdev);
3324 if (status)
3325 goto do_none;
3326
3327 status = pci_request_regions(pdev, DRV_NAME);
3328 if (status)
3329 goto disable_dev;
3330 pci_set_master(pdev);
3331
Sathya Perla3c8def92011-06-12 20:01:58 +00003332 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003333 if (netdev == NULL) {
3334 status = -ENOMEM;
3335 goto rel_reg;
3336 }
3337 adapter = netdev_priv(netdev);
3338 adapter->pdev = pdev;
3339 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003340
3341 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003342 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003343 goto free_netdev;
3344
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003345 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003346 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003347
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003348 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003349 if (!status) {
3350 netdev->features |= NETIF_F_HIGHDMA;
3351 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003352 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003353 if (status) {
3354 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3355 goto free_netdev;
3356 }
3357 }
3358
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003359 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003360 if (adapter->sriov_enabled) {
3361 adapter->vf_cfg = kcalloc(num_vfs,
3362 sizeof(struct be_vf_cfg), GFP_KERNEL);
3363
3364 if (!adapter->vf_cfg)
3365 goto free_netdev;
3366 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003367
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003368 status = be_ctrl_init(adapter);
3369 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003370 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003371
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003372 if (lancer_chip(adapter)) {
3373 status = lancer_test_and_set_rdy_state(adapter);
3374 if (status) {
3375 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003376 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003377 }
3378 }
3379
Sathya Perla2243e2e2009-11-22 22:02:03 +00003380 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003381 if (be_physfn(adapter)) {
3382 status = be_cmd_POST(adapter);
3383 if (status)
3384 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003385 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003386
3387 /* tell fw we're ready to fire cmds */
3388 status = be_cmd_fw_init(adapter);
3389 if (status)
3390 goto ctrl_clean;
3391
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003392 status = be_cmd_reset_function(adapter);
3393 if (status)
3394 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003395
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003396 status = be_stats_init(adapter);
3397 if (status)
3398 goto ctrl_clean;
3399
Sathya Perla2243e2e2009-11-22 22:02:03 +00003400 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003401 if (status)
3402 goto stats_clean;
3403
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003404 /* The INTR bit may be set in the card when probed by a kdump kernel
3405 * after a crash.
3406 */
3407 if (!lancer_chip(adapter))
3408 be_intr_set(adapter, false);
3409
Sathya Perla3abcded2010-10-03 22:12:27 -07003410 be_msix_enable(adapter);
3411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003412 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003413
Sathya Perla5fb379e2009-06-18 00:02:59 +00003414 status = be_setup(adapter);
3415 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003416 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003417
Sathya Perla3abcded2010-10-03 22:12:27 -07003418 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003419 status = register_netdev(netdev);
3420 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003421 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003422 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003423
Ajit Khapardee6319362011-02-11 13:35:41 +00003424 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003425 u8 mac_speed;
3426 bool link_up;
3427 u16 vf, lnk_speed;
3428
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003429 if (!lancer_chip(adapter)) {
3430 status = be_vf_eth_addr_config(adapter);
3431 if (status)
3432 goto unreg_netdev;
3433 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003434
3435 for (vf = 0; vf < num_vfs; vf++) {
3436 status = be_cmd_link_status_query(adapter, &link_up,
3437 &mac_speed, &lnk_speed, vf + 1);
3438 if (!status)
3439 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3440 else
3441 goto unreg_netdev;
3442 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003443 }
3444
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003445 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003446
Somnath Koturf203af72010-10-25 23:01:03 +00003447 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003448 return 0;
3449
Ajit Khapardee6319362011-02-11 13:35:41 +00003450unreg_netdev:
3451 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003452unsetup:
3453 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003454msix_disable:
3455 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003456stats_clean:
3457 be_stats_cleanup(adapter);
3458ctrl_clean:
3459 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003460free_vf_cfg:
3461 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003462free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003463 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003464 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003465 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003466rel_reg:
3467 pci_release_regions(pdev);
3468disable_dev:
3469 pci_disable_device(pdev);
3470do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003471 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003472 return status;
3473}
3474
3475static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3476{
3477 struct be_adapter *adapter = pci_get_drvdata(pdev);
3478 struct net_device *netdev = adapter->netdev;
3479
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003480 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003481 if (adapter->wol)
3482 be_setup_wol(adapter, true);
3483
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003484 netif_device_detach(netdev);
3485 if (netif_running(netdev)) {
3486 rtnl_lock();
3487 be_close(netdev);
3488 rtnl_unlock();
3489 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003490 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003491 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003492
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003493 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494 pci_save_state(pdev);
3495 pci_disable_device(pdev);
3496 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3497 return 0;
3498}
3499
3500static int be_resume(struct pci_dev *pdev)
3501{
3502 int status = 0;
3503 struct be_adapter *adapter = pci_get_drvdata(pdev);
3504 struct net_device *netdev = adapter->netdev;
3505
3506 netif_device_detach(netdev);
3507
3508 status = pci_enable_device(pdev);
3509 if (status)
3510 return status;
3511
3512 pci_set_power_state(pdev, 0);
3513 pci_restore_state(pdev);
3514
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003515 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003516 /* tell fw we're ready to fire cmds */
3517 status = be_cmd_fw_init(adapter);
3518 if (status)
3519 return status;
3520
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003521 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003522 if (netif_running(netdev)) {
3523 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003524 be_open(netdev);
3525 rtnl_unlock();
3526 }
3527 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003528
3529 if (adapter->wol)
3530 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003531
3532 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533 return 0;
3534}
3535
Sathya Perla82456b02010-02-17 01:35:37 +00003536/*
3537 * An FLR will stop BE from DMAing any data.
3538 */
3539static void be_shutdown(struct pci_dev *pdev)
3540{
3541 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003542
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003543 if (!adapter)
3544 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003545
Sathya Perla0f4a6822011-03-21 20:49:28 +00003546 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003547
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003548 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003549
Sathya Perla82456b02010-02-17 01:35:37 +00003550 if (adapter->wol)
3551 be_setup_wol(adapter, true);
3552
Ajit Khaparde57841862011-04-06 18:08:43 +00003553 be_cmd_reset_function(adapter);
3554
Sathya Perla82456b02010-02-17 01:35:37 +00003555 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003556}
3557
Sathya Perlacf588472010-02-14 21:22:01 +00003558static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3559 pci_channel_state_t state)
3560{
3561 struct be_adapter *adapter = pci_get_drvdata(pdev);
3562 struct net_device *netdev = adapter->netdev;
3563
3564 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3565
3566 adapter->eeh_err = true;
3567
3568 netif_device_detach(netdev);
3569
3570 if (netif_running(netdev)) {
3571 rtnl_lock();
3572 be_close(netdev);
3573 rtnl_unlock();
3574 }
3575 be_clear(adapter);
3576
3577 if (state == pci_channel_io_perm_failure)
3578 return PCI_ERS_RESULT_DISCONNECT;
3579
3580 pci_disable_device(pdev);
3581
3582 return PCI_ERS_RESULT_NEED_RESET;
3583}
3584
3585static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3586{
3587 struct be_adapter *adapter = pci_get_drvdata(pdev);
3588 int status;
3589
3590 dev_info(&adapter->pdev->dev, "EEH reset\n");
3591 adapter->eeh_err = false;
3592
3593 status = pci_enable_device(pdev);
3594 if (status)
3595 return PCI_ERS_RESULT_DISCONNECT;
3596
3597 pci_set_master(pdev);
3598 pci_set_power_state(pdev, 0);
3599 pci_restore_state(pdev);
3600
3601 /* Check if card is ok and fw is ready */
3602 status = be_cmd_POST(adapter);
3603 if (status)
3604 return PCI_ERS_RESULT_DISCONNECT;
3605
3606 return PCI_ERS_RESULT_RECOVERED;
3607}
3608
3609static void be_eeh_resume(struct pci_dev *pdev)
3610{
3611 int status = 0;
3612 struct be_adapter *adapter = pci_get_drvdata(pdev);
3613 struct net_device *netdev = adapter->netdev;
3614
3615 dev_info(&adapter->pdev->dev, "EEH resume\n");
3616
3617 pci_save_state(pdev);
3618
3619 /* tell fw we're ready to fire cmds */
3620 status = be_cmd_fw_init(adapter);
3621 if (status)
3622 goto err;
3623
3624 status = be_setup(adapter);
3625 if (status)
3626 goto err;
3627
3628 if (netif_running(netdev)) {
3629 status = be_open(netdev);
3630 if (status)
3631 goto err;
3632 }
3633 netif_device_attach(netdev);
3634 return;
3635err:
3636 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003637}
3638
3639static struct pci_error_handlers be_eeh_handlers = {
3640 .error_detected = be_eeh_err_detected,
3641 .slot_reset = be_eeh_reset,
3642 .resume = be_eeh_resume,
3643};
3644
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003645static struct pci_driver be_driver = {
3646 .name = DRV_NAME,
3647 .id_table = be_dev_ids,
3648 .probe = be_probe,
3649 .remove = be_remove,
3650 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003651 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003652 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003653 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003654};
3655
3656static int __init be_init_module(void)
3657{
Joe Perches8e95a202009-12-03 07:58:21 +00003658 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3659 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003660 printk(KERN_WARNING DRV_NAME
3661 " : Module param rx_frag_size must be 2048/4096/8192."
3662 " Using 2048\n");
3663 rx_frag_size = 2048;
3664 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003665
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003666 return pci_register_driver(&be_driver);
3667}
3668module_init(be_init_module);
3669
3670static void __exit be_exit_module(void)
3671{
3672 pci_unregister_driver(&be_driver);
3673}
3674module_exit(be_exit_module);