blob: 4ebd7000b0cf8cc05539a83c20abf97776e04857 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147
Sathya Perlacf588472010-02-14 21:22:01 +0000148 if (adapter->eeh_err)
149 return;
150
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 iowrite32(reg, addr);
159}
160
Sathya Perla8788fdc2009-07-27 22:52:03 +0000161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000166
167 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000176
177 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179}
180
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000188
189 if (adapter->eeh_err)
190 return;
191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000207
208 if (adapter->eeh_err)
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
Ajit Khapardef8617e02011-02-11 13:36:37 +0000232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000234 if (status)
235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000238 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000239netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000246static void populate_be2_stats(struct be_adapter *adapter)
247{
248
249 struct be_drv_stats *drvs = &adapter->drv_stats;
250 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
251 struct be_port_rxf_stats_v0 *port_stats =
252 be_port_rxf_stats_from_cmd(adapter);
253 struct be_rxf_stats_v0 *rxf_stats =
254 be_rxf_stats_from_cmd(adapter);
255
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270 drvs->rx_input_fifo_overflow_drop =
271 port_stats->rx_input_fifo_overflow;
272 drvs->rx_dropped_header_too_small =
273 port_stats->rx_dropped_header_too_small;
274 drvs->rx_address_match_errors =
275 port_stats->rx_address_match_errors;
276 drvs->rx_alignment_symbol_errors =
277 port_stats->rx_alignment_symbol_errors;
278
279 drvs->tx_pauseframes = port_stats->tx_pauseframes;
280 drvs->tx_controlframes = port_stats->tx_controlframes;
281
282 if (adapter->port_num)
283 drvs->jabber_events =
284 rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events =
287 rxf_stats->port0_jabber_events;
288 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
289 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
290 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
291 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
292 drvs->forwarded_packets = rxf_stats->forwarded_packets;
293 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
294 drvs->rx_drops_no_tpre_descr =
295 rxf_stats->rx_drops_no_tpre_descr;
296 drvs->rx_drops_too_many_frags =
297 rxf_stats->rx_drops_too_many_frags;
298 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
299}
300
301static void populate_be3_stats(struct be_adapter *adapter)
302{
303 struct be_drv_stats *drvs = &adapter->drv_stats;
304 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
305
306 struct be_rxf_stats_v1 *rxf_stats =
307 be_rxf_stats_from_cmd(adapter);
308 struct be_port_rxf_stats_v1 *port_stats =
309 be_port_rxf_stats_from_cmd(adapter);
310
311 drvs->rx_priority_pause_frames = 0;
312 drvs->pmem_fifo_overflow_drop = 0;
313 drvs->rx_pause_frames = port_stats->rx_pause_frames;
314 drvs->rx_crc_errors = port_stats->rx_crc_errors;
315 drvs->rx_control_frames = port_stats->rx_control_frames;
316 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
317 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
318 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
319 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
320 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
321 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
322 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
323 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
324 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
325 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
326 drvs->rx_dropped_header_too_small =
327 port_stats->rx_dropped_header_too_small;
328 drvs->rx_input_fifo_overflow_drop =
329 port_stats->rx_input_fifo_overflow_drop;
330 drvs->rx_address_match_errors =
331 port_stats->rx_address_match_errors;
332 drvs->rx_alignment_symbol_errors =
333 port_stats->rx_alignment_symbol_errors;
334 drvs->rxpp_fifo_overflow_drop =
335 port_stats->rxpp_fifo_overflow_drop;
336 drvs->tx_pauseframes = port_stats->tx_pauseframes;
337 drvs->tx_controlframes = port_stats->tx_controlframes;
338 drvs->jabber_events = port_stats->jabber_events;
339 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343 drvs->forwarded_packets = rxf_stats->forwarded_packets;
344 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
345 drvs->rx_drops_no_tpre_descr =
346 rxf_stats->rx_drops_no_tpre_descr;
347 drvs->rx_drops_too_many_frags =
348 rxf_stats->rx_drops_too_many_frags;
349 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350}
351
Selvin Xavier005d5692011-05-16 07:36:35 +0000352static void populate_lancer_stats(struct be_adapter *adapter)
353{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354
Selvin Xavier005d5692011-05-16 07:36:35 +0000355 struct be_drv_stats *drvs = &adapter->drv_stats;
356 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
357 (adapter);
358 drvs->rx_priority_pause_frames = 0;
359 drvs->pmem_fifo_overflow_drop = 0;
360 drvs->rx_pause_frames =
Selvin Xavieraedfebb2011-06-06 02:27:13 +0000361 make_64bit_val(pport_stats->rx_pause_frames_hi,
362 pport_stats->rx_pause_frames_lo);
Selvin Xavier005d5692011-05-16 07:36:35 +0000363 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
364 pport_stats->rx_crc_errors_lo);
365 drvs->rx_control_frames =
366 make_64bit_val(pport_stats->rx_control_frames_hi,
367 pport_stats->rx_control_frames_lo);
368 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long =
370 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
371 pport_stats->rx_frames_too_long_lo);
372 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376 drvs->rx_dropped_tcp_length =
377 pport_stats->rx_dropped_invalid_tcp_length;
378 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381 drvs->rx_dropped_header_too_small =
382 pport_stats->rx_dropped_header_too_small;
383 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
385 drvs->rx_alignment_symbol_errors =
386 make_64bit_val(pport_stats->rx_symbol_errors_hi,
387 pport_stats->rx_symbol_errors_lo);
388 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
390 pport_stats->tx_pause_frames_lo);
391 drvs->tx_controlframes =
392 make_64bit_val(pport_stats->tx_control_frames_hi,
393 pport_stats->tx_control_frames_lo);
394 drvs->jabber_events = pport_stats->rx_jabbers;
395 drvs->rx_drops_no_pbuf = 0;
396 drvs->rx_drops_no_txpb = 0;
397 drvs->rx_drops_no_erx_descr = 0;
398 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
399 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
400 pport_stats->num_forwards_lo);
401 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
402 pport_stats->rx_drops_mtu_lo);
403 drvs->rx_drops_no_tpre_descr = 0;
404 drvs->rx_drops_too_many_frags =
405 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
406 pport_stats->rx_drops_too_many_frags_lo);
407}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408
409void be_parse_stats(struct be_adapter *adapter)
410{
Selvin Xavier005d5692011-05-16 07:36:35 +0000411 if (adapter->generation == BE_GEN3) {
412 if (lancer_chip(adapter))
413 populate_lancer_stats(adapter);
414 else
415 populate_be3_stats(adapter);
416 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000418 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419}
420
Sathya Perlab31c50a2009-09-17 10:30:13 -0700421void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700422{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde78122a52009-10-07 03:11:20 -0700424 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700425 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000426 struct be_tx_obj *txo;
Sathya Perla6e533912011-06-26 20:40:48 +0000427 unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -0700428 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700429
Sathya Perla3abcded2010-10-03 22:12:27 -0700430 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6e533912011-06-26 20:40:48 +0000431 pkts += rx_stats(rxo)->rx_pkts;
432 bytes += rx_stats(rxo)->rx_bytes;
433 mcast += rx_stats(rxo)->rx_mcast_pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700434 /* no space in linux buffers: best possible approximation */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000435 if (adapter->generation == BE_GEN3) {
Selvin Xavier005d5692011-05-16 07:36:35 +0000436 if (!(lancer_chip(adapter))) {
Sathya Perla6e533912011-06-26 20:40:48 +0000437 struct be_erx_stats_v1 *erx =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 be_erx_stats_from_cmd(adapter);
Sathya Perla6e533912011-06-26 20:40:48 +0000439 drops += erx->rx_drops_no_fragments[rxo->q.id];
Selvin Xavier005d5692011-05-16 07:36:35 +0000440 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 } else {
Sathya Perla6e533912011-06-26 20:40:48 +0000442 struct be_erx_stats_v0 *erx =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 be_erx_stats_from_cmd(adapter);
Sathya Perla6e533912011-06-26 20:40:48 +0000444 drops += erx->rx_drops_no_fragments[rxo->q.id];
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000445 }
Sathya Perla3abcded2010-10-03 22:12:27 -0700446 }
Sathya Perla6e533912011-06-26 20:40:48 +0000447 dev_stats->rx_packets = pkts;
448 dev_stats->rx_bytes = bytes;
449 dev_stats->multicast = mcast;
450 dev_stats->rx_dropped = drops;
Sathya Perla3abcded2010-10-03 22:12:27 -0700451
Sathya Perla6e533912011-06-26 20:40:48 +0000452 pkts = bytes = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000453 for_all_tx_queues(adapter, txo, i) {
Sathya Perla6e533912011-06-26 20:40:48 +0000454 pkts += tx_stats(txo)->be_tx_pkts;
455 bytes += tx_stats(txo)->be_tx_bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000456 }
Sathya Perla6e533912011-06-26 20:40:48 +0000457 dev_stats->tx_packets = pkts;
458 dev_stats->tx_bytes = bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700459
460 /* bad pkts received */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461 dev_stats->rx_errors = drvs->rx_crc_errors +
462 drvs->rx_alignment_symbol_errors +
463 drvs->rx_in_range_errors +
464 drvs->rx_out_range_errors +
465 drvs->rx_frame_too_long +
466 drvs->rx_dropped_too_small +
467 drvs->rx_dropped_too_short +
468 drvs->rx_dropped_header_too_small +
469 drvs->rx_dropped_tcp_length +
470 drvs->rx_dropped_runt +
471 drvs->rx_tcp_checksum_errs +
472 drvs->rx_ip_checksum_errs +
473 drvs->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700475 /* detailed rx errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000476 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
477 drvs->rx_out_range_errors +
478 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000479
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000480 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481
482 /* frame alignment errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000483 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000484
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700485 /* receiver fifo overrun */
486 /* drops_no_pbuf is no per i/f, it's per BE card */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000487 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
488 drvs->rx_input_fifo_overflow_drop +
489 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490}
491
Sathya Perla8788fdc2009-07-27 22:52:03 +0000492void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700494 struct net_device *netdev = adapter->netdev;
495
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000497 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000498 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000499 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500 netif_carrier_on(netdev);
501 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000502 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000503 netif_carrier_off(netdev);
504 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700505 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000506 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508}
509
510/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700511static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512{
Sathya Perla3abcded2010-10-03 22:12:27 -0700513 struct be_eq_obj *rx_eq = &rxo->rx_eq;
514 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700515 ulong now = jiffies;
516 u32 eqd;
517
518 if (!rx_eq->enable_aic)
519 return;
520
521 /* Wrapped around */
522 if (time_before(now, stats->rx_fps_jiffies)) {
523 stats->rx_fps_jiffies = now;
524 return;
525 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526
527 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700528 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529 return;
530
Sathya Perla3abcded2010-10-03 22:12:27 -0700531 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700532 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533
Sathya Perla4097f662009-03-24 16:40:13 -0700534 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700535 stats->prev_rx_frags = stats->rx_frags;
536 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 eqd = eqd << 3;
538 if (eqd > rx_eq->max_eqd)
539 eqd = rx_eq->max_eqd;
540 if (eqd < rx_eq->min_eqd)
541 eqd = rx_eq->min_eqd;
542 if (eqd < 10)
543 eqd = 0;
544 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000545 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546
547 rx_eq->cur_eqd = eqd;
548}
549
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700550static u32 be_calc_rate(u64 bytes, unsigned long ticks)
551{
552 u64 rate = bytes;
553
554 do_div(rate, ticks / HZ);
555 rate <<= 3; /* bytes/sec -> bits/sec */
556 do_div(rate, 1000000ul); /* MB/Sec */
557
558 return rate;
559}
560
Sathya Perla3c8def92011-06-12 20:01:58 +0000561static void be_tx_rate_update(struct be_tx_obj *txo)
Sathya Perla4097f662009-03-24 16:40:13 -0700562{
Sathya Perla3c8def92011-06-12 20:01:58 +0000563 struct be_tx_stats *stats = tx_stats(txo);
Sathya Perla4097f662009-03-24 16:40:13 -0700564 ulong now = jiffies;
565
566 /* Wrapped around? */
567 if (time_before(now, stats->be_tx_jiffies)) {
568 stats->be_tx_jiffies = now;
569 return;
570 }
571
572 /* Update tx rate once in two seconds */
573 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700574 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
575 - stats->be_tx_bytes_prev,
576 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700577 stats->be_tx_jiffies = now;
578 stats->be_tx_bytes_prev = stats->be_tx_bytes;
579 }
580}
581
Sathya Perla3c8def92011-06-12 20:01:58 +0000582static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000583 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perla3c8def92011-06-12 20:01:58 +0000585 struct be_tx_stats *stats = tx_stats(txo);
586
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587 stats->be_tx_reqs++;
588 stats->be_tx_wrbs += wrb_cnt;
589 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000590 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 if (stopped)
592 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593}
594
595/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000596static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
597 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700599 int cnt = (skb->len > skb->data_len);
600
601 cnt += skb_shinfo(skb)->nr_frags;
602
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700603 /* to account for hdr wrb */
604 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000605 if (lancer_chip(adapter) || !(cnt & 1)) {
606 *dummy = false;
607 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608 /* add a dummy to make it an even num */
609 cnt++;
610 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000611 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
613 return cnt;
614}
615
616static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
617{
618 wrb->frag_pa_hi = upper_32_bits(addr);
619 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
620 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
621}
622
Somnath Koturcc4ce022010-10-21 07:11:14 -0700623static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
624 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700626 u8 vlan_prio = 0;
627 u16 vlan_tag = 0;
628
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629 memset(hdr, 0, sizeof(*hdr));
630
631 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
632
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000633 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
636 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000637 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000638 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000639 if (lancer_chip(adapter) && adapter->sli_family ==
640 LANCER_A0_SLI_FAMILY) {
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
642 if (is_tcp_pkt(skb))
643 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
644 tcpcs, hdr, 1);
645 else if (is_udp_pkt(skb))
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
647 udpcs, hdr, 1);
648 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650 if (is_tcp_pkt(skb))
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652 else if (is_udp_pkt(skb))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654 }
655
Somnath Koturcc4ce022010-10-21 07:11:14 -0700656 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658 vlan_tag = vlan_tx_tag_get(skb);
659 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
660 /* If vlan priority provided by OS is NOT in available bmap */
661 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
662 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
663 adapter->recommended_prio;
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 }
666
667 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
671}
672
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000673static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000674 bool unmap_single)
675{
676 dma_addr_t dma;
677
678 be_dws_le_to_cpu(wrb, sizeof(*wrb));
679
680 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000681 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000682 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000683 dma_unmap_single(dev, dma, wrb->frag_len,
684 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000685 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000686 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000687 }
688}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689
Sathya Perla3c8def92011-06-12 20:01:58 +0000690static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
692{
Sathya Perla7101e112010-03-22 20:41:12 +0000693 dma_addr_t busaddr;
694 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000695 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 struct be_eth_wrb *wrb;
698 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000699 bool map_single = false;
700 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 hdr = queue_head_node(txq);
703 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000704 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
David S. Millerebc8d2a2009-06-09 01:01:31 -0700706 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700707 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000708 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
709 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000710 goto dma_err;
711 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700712 wrb = queue_head_node(txq);
713 wrb_fill(wrb, busaddr, len);
714 be_dws_cpu_to_le(wrb, sizeof(*wrb));
715 queue_head_inc(txq);
716 copied += len;
717 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
David S. Millerebc8d2a2009-06-09 01:01:31 -0700719 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
720 struct skb_frag_struct *frag =
721 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000722 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
723 frag->size, DMA_TO_DEVICE);
724 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000725 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700726 wrb = queue_head_node(txq);
727 wrb_fill(wrb, busaddr, frag->size);
728 be_dws_cpu_to_le(wrb, sizeof(*wrb));
729 queue_head_inc(txq);
730 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 }
732
733 if (dummy_wrb) {
734 wrb = queue_head_node(txq);
735 wrb_fill(wrb, 0, 0);
736 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737 queue_head_inc(txq);
738 }
739
Somnath Koturcc4ce022010-10-21 07:11:14 -0700740 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700741 be_dws_cpu_to_le(hdr, sizeof(*hdr));
742
743 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000744dma_err:
745 txq->head = map_head;
746 while (copied) {
747 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000748 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000749 map_single = false;
750 copied -= wrb->frag_len;
751 queue_head_inc(txq);
752 }
753 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754}
755
Stephen Hemminger613573252009-08-31 19:50:58 +0000756static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700757 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758{
759 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000760 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
761 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762 u32 wrb_cnt = 0, copied = 0;
763 u32 start = txq->head;
764 bool dummy_wrb, stopped = false;
765
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000766 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767
Sathya Perla3c8def92011-06-12 20:01:58 +0000768 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000769 if (copied) {
770 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
Sathya Perla7101e112010-03-22 20:41:12 +0000778 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000782 stopped = true;
783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000785 be_txq_notify(adapter, txq->id, wrb_cnt);
786
Sathya Perla3c8def92011-06-12 20:01:58 +0000787 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000788 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000789 } else {
790 txq->head = start;
791 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000818static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820 u16 vtag[BE_NUM_VLANS_SUPPORTED];
821 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000822 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000823 u32 if_handle;
824
825 if (vf) {
826 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
827 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
828 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
829 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830
Ajit Khaparde82903e42010-02-09 01:34:57 +0000831 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000833 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834 if (adapter->vlan_tag[i]) {
835 vtag[ntags] = cpu_to_le16(i);
836 ntags++;
837 }
838 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700839 status = be_cmd_vlan_config(adapter, adapter->if_handle,
840 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700842 status = be_cmd_vlan_config(adapter, adapter->if_handle,
843 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000845
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700847}
848
849static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
850{
851 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854}
855
856static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
857{
858 struct be_adapter *adapter = netdev_priv(netdev);
859
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000860 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000861 if (!be_physfn(adapter))
862 return;
863
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000866 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700867}
868
869static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
870{
871 struct be_adapter *adapter = netdev_priv(netdev);
872
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000873 adapter->vlans_added--;
874 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
875
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000876 if (!be_physfn(adapter))
877 return;
878
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000880 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000881 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700882}
883
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884static void be_set_multicast_list(struct net_device *netdev)
885{
886 struct be_adapter *adapter = netdev_priv(netdev);
887
888 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000889 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000890 adapter->promiscuous = true;
891 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000893
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300894 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000895 if (adapter->promiscuous) {
896 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000897 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000898 }
899
Sathya Perlae7b909a2009-11-22 22:01:10 +0000900 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000901 if (netdev->flags & IFF_ALLMULTI ||
902 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000903 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000904 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000905 goto done;
906 }
907
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000908 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800909 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000910done:
911 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700912}
913
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000914static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
915{
916 struct be_adapter *adapter = netdev_priv(netdev);
917 int status;
918
919 if (!adapter->sriov_enabled)
920 return -EPERM;
921
922 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
923 return -EINVAL;
924
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000925 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
926 status = be_cmd_pmac_del(adapter,
927 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000928 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000929
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000930 status = be_cmd_pmac_add(adapter, mac,
931 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000932 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000933
934 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000935 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
936 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000937 else
938 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
939
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000940 return status;
941}
942
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000943static int be_get_vf_config(struct net_device *netdev, int vf,
944 struct ifla_vf_info *vi)
945{
946 struct be_adapter *adapter = netdev_priv(netdev);
947
948 if (!adapter->sriov_enabled)
949 return -EPERM;
950
951 if (vf >= num_vfs)
952 return -EINVAL;
953
954 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000955 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000956 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000957 vi->qos = 0;
958 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
959
960 return 0;
961}
962
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000963static int be_set_vf_vlan(struct net_device *netdev,
964 int vf, u16 vlan, u8 qos)
965{
966 struct be_adapter *adapter = netdev_priv(netdev);
967 int status = 0;
968
969 if (!adapter->sriov_enabled)
970 return -EPERM;
971
972 if ((vf >= num_vfs) || (vlan > 4095))
973 return -EINVAL;
974
975 if (vlan) {
976 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
977 adapter->vlans_added++;
978 } else {
979 adapter->vf_cfg[vf].vf_vlan_tag = 0;
980 adapter->vlans_added--;
981 }
982
983 status = be_vid_config(adapter, true, vf);
984
985 if (status)
986 dev_info(&adapter->pdev->dev,
987 "VLAN %d config on VF %d failed\n", vlan, vf);
988 return status;
989}
990
Ajit Khapardee1d18732010-07-23 01:52:13 +0000991static int be_set_vf_tx_rate(struct net_device *netdev,
992 int vf, int rate)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
995 int status = 0;
996
997 if (!adapter->sriov_enabled)
998 return -EPERM;
999
1000 if ((vf >= num_vfs) || (rate < 0))
1001 return -EINVAL;
1002
1003 if (rate > 10000)
1004 rate = 10000;
1005
1006 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +00001007 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001008
1009 if (status)
1010 dev_info(&adapter->pdev->dev,
1011 "tx rate %d on VF %d failed\n", rate, vf);
1012 return status;
1013}
1014
Sathya Perla3abcded2010-10-03 22:12:27 -07001015static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016{
Sathya Perla3abcded2010-10-03 22:12:27 -07001017 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001018 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001019
Sathya Perla4097f662009-03-24 16:40:13 -07001020 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001021 if (time_before(now, stats->rx_jiffies)) {
1022 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001023 return;
1024 }
1025
1026 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -07001027 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001028 return;
1029
Sathya Perla3abcded2010-10-03 22:12:27 -07001030 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1031 now - stats->rx_jiffies);
1032 stats->rx_jiffies = now;
1033 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -07001034}
1035
Sathya Perla3abcded2010-10-03 22:12:27 -07001036static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001037 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001038{
Sathya Perla3abcded2010-10-03 22:12:27 -07001039 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001040
Sathya Perla3abcded2010-10-03 22:12:27 -07001041 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001042 stats->rx_frags += rxcp->num_rcvd;
1043 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001044 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001045 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001046 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001047 if (rxcp->err)
1048 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049}
1050
Sathya Perla2e588f82011-03-11 02:49:26 +00001051static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001052{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001053 /* L4 checksum is not reliable for non TCP/UDP packets.
1054 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001055 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1056 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001057}
1058
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001060get_rx_page_info(struct be_adapter *adapter,
1061 struct be_rx_obj *rxo,
1062 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063{
1064 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001065 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001066
Sathya Perla3abcded2010-10-03 22:12:27 -07001067 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068 BUG_ON(!rx_page_info->page);
1069
Ajit Khaparde205859a2010-02-09 01:34:21 +00001070 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001071 dma_unmap_page(&adapter->pdev->dev,
1072 dma_unmap_addr(rx_page_info, bus),
1073 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001074 rx_page_info->last_page_user = false;
1075 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076
1077 atomic_dec(&rxq->used);
1078 return rx_page_info;
1079}
1080
1081/* Throwaway the data in the Rx completion */
1082static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001083 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001084 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085{
Sathya Perla3abcded2010-10-03 22:12:27 -07001086 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001088 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001090 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001091 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001092 put_page(page_info->page);
1093 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001094 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095 }
1096}
1097
1098/*
1099 * skb_fill_rx_data forms a complete skb for an ether frame
1100 * indicated by rxcp.
1101 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001102static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001103 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104{
Sathya Perla3abcded2010-10-03 22:12:27 -07001105 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001107 u16 i, j;
1108 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001109 u8 *start;
1110
Sathya Perla2e588f82011-03-11 02:49:26 +00001111 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112 start = page_address(page_info->page) + page_info->page_offset;
1113 prefetch(start);
1114
1115 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001116 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117
1118 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001119 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120 memcpy(skb->data, start, hdr_len);
1121 skb->len = curr_frag_len;
1122 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1123 /* Complete packet has now been moved to data */
1124 put_page(page_info->page);
1125 skb->data_len = 0;
1126 skb->tail += curr_frag_len;
1127 } else {
1128 skb_shinfo(skb)->nr_frags = 1;
1129 skb_shinfo(skb)->frags[0].page = page_info->page;
1130 skb_shinfo(skb)->frags[0].page_offset =
1131 page_info->page_offset + hdr_len;
1132 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1133 skb->data_len = curr_frag_len - hdr_len;
1134 skb->tail += hdr_len;
1135 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001136 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001137
Sathya Perla2e588f82011-03-11 02:49:26 +00001138 if (rxcp->pkt_size <= rx_frag_size) {
1139 BUG_ON(rxcp->num_rcvd != 1);
1140 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141 }
1142
1143 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001144 index_inc(&rxcp->rxq_idx, rxq->len);
1145 remaining = rxcp->pkt_size - curr_frag_len;
1146 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1147 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1148 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001150 /* Coalesce all frags from the same physical page in one slot */
1151 if (page_info->page_offset == 0) {
1152 /* Fresh page */
1153 j++;
1154 skb_shinfo(skb)->frags[j].page = page_info->page;
1155 skb_shinfo(skb)->frags[j].page_offset =
1156 page_info->page_offset;
1157 skb_shinfo(skb)->frags[j].size = 0;
1158 skb_shinfo(skb)->nr_frags++;
1159 } else {
1160 put_page(page_info->page);
1161 }
1162
1163 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164 skb->len += curr_frag_len;
1165 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166
Sathya Perla2e588f82011-03-11 02:49:26 +00001167 remaining -= curr_frag_len;
1168 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001169 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001171 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172}
1173
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001174/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001175static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001176 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001177 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001179 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001181
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001182 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001183 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 if (net_ratelimit())
1185 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001186 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187 return;
1188 }
1189
Sathya Perla2e588f82011-03-11 02:49:26 +00001190 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001192 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001193 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001194 else
1195 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196
1197 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001198 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001199 if (adapter->netdev->features & NETIF_F_RXHASH)
1200 skb->rxhash = rxcp->rss_hash;
1201
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001204 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 kfree_skb(skb);
1206 return;
1207 }
Somnath Kotur6709d952011-05-04 22:40:46 +00001208 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1209 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210 } else {
1211 netif_receive_skb(skb);
1212 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001213}
1214
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001215/* Process the RX completion indicated by rxcp when GRO is enabled */
1216static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001217 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001218 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219{
1220 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001221 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001222 struct be_queue_info *rxq = &rxo->q;
1223 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001224 u16 remaining, curr_frag_len;
1225 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001226
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001227 skb = napi_get_frags(&eq_obj->napi);
1228 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001229 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001230 return;
1231 }
1232
Sathya Perla2e588f82011-03-11 02:49:26 +00001233 remaining = rxcp->pkt_size;
1234 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1235 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001236
1237 curr_frag_len = min(remaining, rx_frag_size);
1238
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001239 /* Coalesce all frags from the same physical page in one slot */
1240 if (i == 0 || page_info->page_offset == 0) {
1241 /* First frag or Fresh page */
1242 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001243 skb_shinfo(skb)->frags[j].page = page_info->page;
1244 skb_shinfo(skb)->frags[j].page_offset =
1245 page_info->page_offset;
1246 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001247 } else {
1248 put_page(page_info->page);
1249 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001250 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001251
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001253 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254 memset(page_info, 0, sizeof(*page_info));
1255 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001256 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001258 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001259 skb->len = rxcp->pkt_size;
1260 skb->data_len = rxcp->pkt_size;
1261 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001262 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001263 if (adapter->netdev->features & NETIF_F_RXHASH)
1264 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001265
Sathya Perla2e588f82011-03-11 02:49:26 +00001266 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001267 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001268 else
Somnath Kotur6709d952011-05-04 22:40:46 +00001269 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1270 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271}
1272
Sathya Perla2e588f82011-03-11 02:49:26 +00001273static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274 struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276{
Sathya Perla2e588f82011-03-11 02:49:26 +00001277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001302 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001303}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Sathya Perla2e588f82011-03-11 02:49:26 +00001305static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1306 struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
1308{
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1333 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001334 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001335}
1336
1337static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1338{
1339 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1340 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1341 struct be_adapter *adapter = rxo->adapter;
1342
1343 /* For checking the valid bit it is Ok to use either definition as the
1344 * valid bit is at the same position in both v0 and v1 Rx compl */
1345 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346 return NULL;
1347
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001348 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001349 be_dws_le_to_cpu(compl, sizeof(*compl));
1350
1351 if (adapter->be3_native)
1352 be_parse_rx_compl_v1(adapter, compl, rxcp);
1353 else
1354 be_parse_rx_compl_v0(adapter, compl, rxcp);
1355
Sathya Perla15d72182011-03-21 20:49:26 +00001356 if (rxcp->vlanf) {
1357 /* vlanf could be wrongly set in some cards.
1358 * ignore if vtm is not set */
1359 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1360 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001361
Sathya Perla15d72182011-03-21 20:49:26 +00001362 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001363 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001364
David S. Miller3c709f82011-05-11 14:26:15 -04001365 if (((adapter->pvid & VLAN_VID_MASK) ==
1366 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1367 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001368 rxcp->vlanf = 0;
1369 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001370
1371 /* As the compl has been parsed, reset it; we wont touch it again */
1372 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
Sathya Perla3abcded2010-10-03 22:12:27 -07001374 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 return rxcp;
1376}
1377
Eric Dumazet1829b082011-03-01 05:48:12 +00001378static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001383 gfp |= __GFP_COMP;
1384 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385}
1386
1387/*
1388 * Allocate a page, split it to fragments of size rx_frag_size and post as
1389 * receive buffers to BE
1390 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001391static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392{
Sathya Perla3abcded2010-10-03 22:12:27 -07001393 struct be_adapter *adapter = rxo->adapter;
1394 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001395 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001396 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 struct page *pagep = NULL;
1398 struct be_eth_rx_d *rxd;
1399 u64 page_dmaaddr = 0, frag_dmaaddr;
1400 u32 posted, page_offset = 0;
1401
Sathya Perla3abcded2010-10-03 22:12:27 -07001402 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1404 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001405 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001407 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 break;
1409 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001410 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1411 0, adapter->big_page_size,
1412 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 page_info->page_offset = 0;
1414 } else {
1415 get_page(pagep);
1416 page_info->page_offset = page_offset + rx_frag_size;
1417 }
1418 page_offset = page_info->page_offset;
1419 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001420 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1422
1423 rxd = queue_head_node(rxq);
1424 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1425 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426
1427 /* Any space left in the current big page for another frag? */
1428 if ((page_offset + rx_frag_size + rx_frag_size) >
1429 adapter->big_page_size) {
1430 pagep = NULL;
1431 page_info->last_page_user = true;
1432 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001433
1434 prev_page_info = page_info;
1435 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 page_info = &page_info_tbl[rxq->head];
1437 }
1438 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001439 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440
1441 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001443 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001444 } else if (atomic_read(&rxq->used) == 0) {
1445 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001446 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448}
1449
Sathya Perla5fb379e2009-06-18 00:02:59 +00001450static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1453
1454 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1455 return NULL;
1456
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001457 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1459
1460 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1461
1462 queue_tail_inc(tx_cq);
1463 return txcp;
1464}
1465
Sathya Perla3c8def92011-06-12 20:01:58 +00001466static u16 be_tx_compl_process(struct be_adapter *adapter,
1467 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468{
Sathya Perla3c8def92011-06-12 20:01:58 +00001469 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001470 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001471 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001473 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1474 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001476 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001478 sent_skbs[txq->tail] = NULL;
1479
1480 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001481 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001483 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001485 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001486 unmap_tx_frag(&adapter->pdev->dev, wrb,
1487 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001488 unmap_skb_hdr = false;
1489
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 num_wrbs++;
1491 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001492 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001495 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496}
1497
Sathya Perla859b1e42009-08-10 03:43:51 +00001498static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1499{
1500 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1501
1502 if (!eqe->evt)
1503 return NULL;
1504
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001505 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001506 eqe->evt = le32_to_cpu(eqe->evt);
1507 queue_tail_inc(&eq_obj->q);
1508 return eqe;
1509}
1510
1511static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001512 struct be_eq_obj *eq_obj,
1513 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001514{
1515 struct be_eq_entry *eqe;
1516 u16 num = 0;
1517
1518 while ((eqe = event_get(eq_obj)) != NULL) {
1519 eqe->evt = 0;
1520 num++;
1521 }
1522
1523 /* Deal with any spurious interrupts that come
1524 * without events
1525 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001526 if (!num)
1527 rearm = true;
1528
1529 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001530 if (num)
1531 napi_schedule(&eq_obj->napi);
1532
1533 return num;
1534}
1535
1536/* Just read and notify events without processing them.
1537 * Used at the time of destroying event queues */
1538static void be_eq_clean(struct be_adapter *adapter,
1539 struct be_eq_obj *eq_obj)
1540{
1541 struct be_eq_entry *eqe;
1542 u16 num = 0;
1543
1544 while ((eqe = event_get(eq_obj)) != NULL) {
1545 eqe->evt = 0;
1546 num++;
1547 }
1548
1549 if (num)
1550 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1551}
1552
Sathya Perla3abcded2010-10-03 22:12:27 -07001553static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
1555 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001556 struct be_queue_info *rxq = &rxo->q;
1557 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001558 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 u16 tail;
1560
1561 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001562 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1563 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001564 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 }
1566
1567 /* Then free posted rx buffer that were not used */
1568 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001569 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001570 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571 put_page(page_info->page);
1572 memset(page_info, 0, sizeof(*page_info));
1573 }
1574 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001575 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576}
1577
Sathya Perla3c8def92011-06-12 20:01:58 +00001578static void be_tx_compl_clean(struct be_adapter *adapter,
1579 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580{
Sathya Perla3c8def92011-06-12 20:01:58 +00001581 struct be_queue_info *tx_cq = &txo->cq;
1582 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001583 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001584 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001585 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001586 struct sk_buff *sent_skb;
1587 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588
Sathya Perlaa8e91792009-08-10 03:42:43 +00001589 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1590 do {
1591 while ((txcp = be_tx_compl_get(tx_cq))) {
1592 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1593 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001594 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001595 cmpl++;
1596 }
1597 if (cmpl) {
1598 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001599 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001600 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001601 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001602 }
1603
1604 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1605 break;
1606
1607 mdelay(1);
1608 } while (true);
1609
1610 if (atomic_read(&txq->used))
1611 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1612 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001613
1614 /* free posted tx for which compls will never arrive */
1615 while (atomic_read(&txq->used)) {
1616 sent_skb = sent_skbs[txq->tail];
1617 end_idx = txq->tail;
1618 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001619 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1620 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001621 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001622 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001623 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624}
1625
Sathya Perla5fb379e2009-06-18 00:02:59 +00001626static void be_mcc_queues_destroy(struct be_adapter *adapter)
1627{
1628 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001629
Sathya Perla8788fdc2009-07-27 22:52:03 +00001630 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001631 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001632 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001633 be_queue_free(adapter, q);
1634
Sathya Perla8788fdc2009-07-27 22:52:03 +00001635 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001636 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001637 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001638 be_queue_free(adapter, q);
1639}
1640
1641/* Must be called only after TX qs are created as MCC shares TX EQ */
1642static int be_mcc_queues_create(struct be_adapter *adapter)
1643{
1644 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001645
1646 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001647 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001648 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001649 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001650 goto err;
1651
1652 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001653 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001654 goto mcc_cq_free;
1655
1656 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001657 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001658 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1659 goto mcc_cq_destroy;
1660
1661 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001662 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001663 goto mcc_q_free;
1664
1665 return 0;
1666
1667mcc_q_free:
1668 be_queue_free(adapter, q);
1669mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001670 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001671mcc_cq_free:
1672 be_queue_free(adapter, cq);
1673err:
1674 return -1;
1675}
1676
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677static void be_tx_queues_destroy(struct be_adapter *adapter)
1678{
1679 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001680 struct be_tx_obj *txo;
1681 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682
Sathya Perla3c8def92011-06-12 20:01:58 +00001683 for_all_tx_queues(adapter, txo, i) {
1684 q = &txo->q;
1685 if (q->created)
1686 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1687 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688
Sathya Perla3c8def92011-06-12 20:01:58 +00001689 q = &txo->cq;
1690 if (q->created)
1691 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1692 be_queue_free(adapter, q);
1693 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694
Sathya Perla859b1e42009-08-10 03:43:51 +00001695 /* Clear any residual events */
1696 be_eq_clean(adapter, &adapter->tx_eq);
1697
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698 q = &adapter->tx_eq.q;
1699 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001700 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 be_queue_free(adapter, q);
1702}
1703
Sathya Perla3c8def92011-06-12 20:01:58 +00001704/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705static int be_tx_queues_create(struct be_adapter *adapter)
1706{
1707 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001708 struct be_tx_obj *txo;
1709 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710
1711 adapter->tx_eq.max_eqd = 0;
1712 adapter->tx_eq.min_eqd = 0;
1713 adapter->tx_eq.cur_eqd = 96;
1714 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001715
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001717 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1718 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719 return -1;
1720
Sathya Perla8788fdc2009-07-27 22:52:03 +00001721 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001722 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001723 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001724
Sathya Perla3c8def92011-06-12 20:01:58 +00001725 for_all_tx_queues(adapter, txo, i) {
1726 cq = &txo->cq;
1727 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001729 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730
Sathya Perla3c8def92011-06-12 20:01:58 +00001731 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1732 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733
Sathya Perla3c8def92011-06-12 20:01:58 +00001734 q = &txo->q;
1735 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1736 sizeof(struct be_eth_wrb)))
1737 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738
Sathya Perla3c8def92011-06-12 20:01:58 +00001739 if (be_cmd_txq_create(adapter, q, cq))
1740 goto err;
1741 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742 return 0;
1743
Sathya Perla3c8def92011-06-12 20:01:58 +00001744err:
1745 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746 return -1;
1747}
1748
1749static void be_rx_queues_destroy(struct be_adapter *adapter)
1750{
1751 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001752 struct be_rx_obj *rxo;
1753 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754
Sathya Perla3abcded2010-10-03 22:12:27 -07001755 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001756 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001757
Sathya Perla3abcded2010-10-03 22:12:27 -07001758 q = &rxo->cq;
1759 if (q->created)
1760 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1761 be_queue_free(adapter, q);
1762
Sathya Perla3abcded2010-10-03 22:12:27 -07001763 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001764 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001765 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001766 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768}
1769
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001770static u32 be_num_rxqs_want(struct be_adapter *adapter)
1771{
Sathya Perlac814fd32011-06-26 20:41:25 +00001772 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001773 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1774 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1775 } else {
1776 dev_warn(&adapter->pdev->dev,
1777 "No support for multiple RX queues\n");
1778 return 1;
1779 }
1780}
1781
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782static int be_rx_queues_create(struct be_adapter *adapter)
1783{
1784 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001785 struct be_rx_obj *rxo;
1786 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001788 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1789 msix_enabled(adapter) ?
1790 adapter->num_msix_vec - 1 : 1);
1791 if (adapter->num_rx_qs != MAX_RX_QS)
1792 dev_warn(&adapter->pdev->dev,
1793 "Can create only %d RX queues", adapter->num_rx_qs);
1794
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001796 for_all_rx_queues(adapter, rxo, i) {
1797 rxo->adapter = adapter;
1798 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1799 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 /* EQ */
1802 eq = &rxo->rx_eq.q;
1803 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1804 sizeof(struct be_eq_entry));
1805 if (rc)
1806 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807
Sathya Perla3abcded2010-10-03 22:12:27 -07001808 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1809 if (rc)
1810 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001812 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001813
Sathya Perla3abcded2010-10-03 22:12:27 -07001814 /* CQ */
1815 cq = &rxo->cq;
1816 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1817 sizeof(struct be_eth_rx_compl));
1818 if (rc)
1819 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820
Sathya Perla3abcded2010-10-03 22:12:27 -07001821 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1822 if (rc)
1823 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001824
1825 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001826 q = &rxo->q;
1827 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1828 sizeof(struct be_eth_rx_d));
1829 if (rc)
1830 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831
Sathya Perla3abcded2010-10-03 22:12:27 -07001832 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833
1834 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001835err:
1836 be_rx_queues_destroy(adapter);
1837 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001840static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001841{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001842 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1843 if (!eqe->evt)
1844 return false;
1845 else
1846 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001847}
1848
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849static irqreturn_t be_intx(int irq, void *dev)
1850{
1851 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001852 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001853 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001855 if (lancer_chip(adapter)) {
1856 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001857 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001858 for_all_rx_queues(adapter, rxo, i) {
1859 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001860 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001861 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001863 if (!(tx || rx))
1864 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001865
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001866 } else {
1867 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1868 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1869 if (!isr)
1870 return IRQ_NONE;
1871
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001872 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001873 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001874
1875 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001876 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001877 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001878 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001879 }
Sathya Perlac001c212009-07-01 01:06:07 +00001880
Sathya Perla8788fdc2009-07-27 22:52:03 +00001881 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882}
1883
1884static irqreturn_t be_msix_rx(int irq, void *dev)
1885{
Sathya Perla3abcded2010-10-03 22:12:27 -07001886 struct be_rx_obj *rxo = dev;
1887 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001888
Sathya Perla3c8def92011-06-12 20:01:58 +00001889 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
1891 return IRQ_HANDLED;
1892}
1893
Sathya Perla5fb379e2009-06-18 00:02:59 +00001894static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895{
1896 struct be_adapter *adapter = dev;
1897
Sathya Perla3c8def92011-06-12 20:01:58 +00001898 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899
1900 return IRQ_HANDLED;
1901}
1902
Sathya Perla2e588f82011-03-11 02:49:26 +00001903static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904{
Sathya Perla2e588f82011-03-11 02:49:26 +00001905 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906}
1907
stephen hemminger49b05222010-10-21 07:50:48 +00001908static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909{
1910 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001911 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1912 struct be_adapter *adapter = rxo->adapter;
1913 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001914 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915 u32 work_done;
1916
Sathya Perla3abcded2010-10-03 22:12:27 -07001917 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001919 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920 if (!rxcp)
1921 break;
1922
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001923 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001924 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001925 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001926 be_rx_compl_process_gro(adapter, rxo, rxcp);
1927 else
1928 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001929 } else if (rxcp->pkt_size == 0) {
1930 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001931 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001932
Sathya Perla2e588f82011-03-11 02:49:26 +00001933 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 }
1935
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001937 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001938 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939
1940 /* All consumed */
1941 if (work_done < budget) {
1942 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001943 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944 } else {
1945 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001946 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947 }
1948 return work_done;
1949}
1950
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001951/* As TX and MCC share the same EQ check for both TX and MCC completions.
1952 * For TX/MCC we don't honour budget; consume everything
1953 */
1954static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001956 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1957 struct be_adapter *adapter =
1958 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001959 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001961 int tx_compl, mcc_compl, status = 0;
1962 u8 i;
1963 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964
Sathya Perla3c8def92011-06-12 20:01:58 +00001965 for_all_tx_queues(adapter, txo, i) {
1966 tx_compl = 0;
1967 num_wrbs = 0;
1968 while ((txcp = be_tx_compl_get(&txo->cq))) {
1969 num_wrbs += be_tx_compl_process(adapter, txo,
1970 AMAP_GET_BITS(struct amap_eth_tx_compl,
1971 wrb_index, txcp));
1972 tx_compl++;
1973 }
1974 if (tx_compl) {
1975 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1976
1977 atomic_sub(num_wrbs, &txo->q.used);
1978
1979 /* As Tx wrbs have been freed up, wake up netdev queue
1980 * if it was stopped due to lack of tx wrbs. */
1981 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1982 atomic_read(&txo->q.used) < txo->q.len / 2) {
1983 netif_wake_subqueue(adapter->netdev, i);
1984 }
1985
1986 adapter->drv_stats.be_tx_events++;
1987 txo->stats.be_tx_compl += tx_compl;
1988 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989 }
1990
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001991 mcc_compl = be_process_mcc(adapter, &status);
1992
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001993 if (mcc_compl) {
1994 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1995 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1996 }
1997
Sathya Perla3c8def92011-06-12 20:01:58 +00001998 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001999
Sathya Perla3c8def92011-06-12 20:01:58 +00002000 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001 return 1;
2002}
2003
Ajit Khaparded053de92010-09-03 06:23:30 +00002004void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002005{
2006 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2007 u32 i;
2008
2009 pci_read_config_dword(adapter->pdev,
2010 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2011 pci_read_config_dword(adapter->pdev,
2012 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2013 pci_read_config_dword(adapter->pdev,
2014 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2015 pci_read_config_dword(adapter->pdev,
2016 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2017
2018 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2019 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2020
Ajit Khaparded053de92010-09-03 06:23:30 +00002021 if (ue_status_lo || ue_status_hi) {
2022 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002023 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00002024 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2025 }
2026
Ajit Khaparde7c185272010-07-29 06:16:33 +00002027 if (ue_status_lo) {
2028 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2029 if (ue_status_lo & 1)
2030 dev_err(&adapter->pdev->dev,
2031 "UE: %s bit set\n", ue_status_low_desc[i]);
2032 }
2033 }
2034 if (ue_status_hi) {
2035 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2036 if (ue_status_hi & 1)
2037 dev_err(&adapter->pdev->dev,
2038 "UE: %s bit set\n", ue_status_hi_desc[i]);
2039 }
2040 }
2041
2042}
2043
Sathya Perlaea1dae12009-03-19 23:56:20 -07002044static void be_worker(struct work_struct *work)
2045{
2046 struct be_adapter *adapter =
2047 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002048 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002049 struct be_tx_obj *txo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002050 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002051
Sathya Perla16da8252011-03-21 20:49:27 +00002052 if (!adapter->ue_detected && !lancer_chip(adapter))
2053 be_detect_dump_ue(adapter);
2054
Somnath Koturf203af72010-10-25 23:01:03 +00002055 /* when interrupts are not yet enabled, just reap any pending
2056 * mcc completions */
2057 if (!netif_running(adapter->netdev)) {
2058 int mcc_compl, status = 0;
2059
2060 mcc_compl = be_process_mcc(adapter, &status);
2061
2062 if (mcc_compl) {
2063 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2064 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2065 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002066
Somnath Koturf203af72010-10-25 23:01:03 +00002067 goto reschedule;
2068 }
2069
Selvin Xavier005d5692011-05-16 07:36:35 +00002070 if (!adapter->stats_cmd_sent) {
2071 if (lancer_chip(adapter))
2072 lancer_cmd_get_pport_stats(adapter,
2073 &adapter->stats_cmd);
2074 else
2075 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2076 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002077
2078 for_all_tx_queues(adapter, txo, i)
2079 be_tx_rate_update(txo);
Sathya Perla4097f662009-03-24 16:40:13 -07002080
Sathya Perla3abcded2010-10-03 22:12:27 -07002081 for_all_rx_queues(adapter, rxo, i) {
2082 be_rx_rate_update(rxo);
2083 be_rx_eqd_update(adapter, rxo);
2084
2085 if (rxo->rx_post_starved) {
2086 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002087 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002088 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002089 }
2090
Somnath Koturf203af72010-10-25 23:01:03 +00002091reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002092 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002093 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2094}
2095
Sathya Perla8d56ff12009-11-22 22:02:26 +00002096static void be_msix_disable(struct be_adapter *adapter)
2097{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002098 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002099 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002100 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002101 }
2102}
2103
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002104static void be_msix_enable(struct be_adapter *adapter)
2105{
Sathya Perla3abcded2010-10-03 22:12:27 -07002106#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002107 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002108
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002109 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002110
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002111 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112 adapter->msix_entries[i].entry = i;
2113
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002114 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002115 if (status == 0) {
2116 goto done;
2117 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002118 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002119 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002120 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002121 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002122 }
2123 return;
2124done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002125 adapter->num_msix_vec = num_vec;
2126 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127}
2128
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002129static void be_sriov_enable(struct be_adapter *adapter)
2130{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002131 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002132#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002133 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002134 int status, pos;
2135 u16 nvfs;
2136
2137 pos = pci_find_ext_capability(adapter->pdev,
2138 PCI_EXT_CAP_ID_SRIOV);
2139 pci_read_config_word(adapter->pdev,
2140 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2141
2142 if (num_vfs > nvfs) {
2143 dev_info(&adapter->pdev->dev,
2144 "Device supports %d VFs and not %d\n",
2145 nvfs, num_vfs);
2146 num_vfs = nvfs;
2147 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002148
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002149 status = pci_enable_sriov(adapter->pdev, num_vfs);
2150 adapter->sriov_enabled = status ? false : true;
2151 }
2152#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002153}
2154
2155static void be_sriov_disable(struct be_adapter *adapter)
2156{
2157#ifdef CONFIG_PCI_IOV
2158 if (adapter->sriov_enabled) {
2159 pci_disable_sriov(adapter->pdev);
2160 adapter->sriov_enabled = false;
2161 }
2162#endif
2163}
2164
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002165static inline int be_msix_vec_get(struct be_adapter *adapter,
2166 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002168 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002169}
2170
2171static int be_request_irq(struct be_adapter *adapter,
2172 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002173 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002174{
2175 struct net_device *netdev = adapter->netdev;
2176 int vec;
2177
2178 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002179 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002180 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002181}
2182
Sathya Perla3abcded2010-10-03 22:12:27 -07002183static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2184 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002185{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002186 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002187 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188}
2189
2190static int be_msix_register(struct be_adapter *adapter)
2191{
Sathya Perla3abcded2010-10-03 22:12:27 -07002192 struct be_rx_obj *rxo;
2193 int status, i;
2194 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195
Sathya Perla3abcded2010-10-03 22:12:27 -07002196 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2197 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198 if (status)
2199 goto err;
2200
Sathya Perla3abcded2010-10-03 22:12:27 -07002201 for_all_rx_queues(adapter, rxo, i) {
2202 sprintf(qname, "rxq%d", i);
2203 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2204 qname, rxo);
2205 if (status)
2206 goto err_msix;
2207 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002208
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002209 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002210
Sathya Perla3abcded2010-10-03 22:12:27 -07002211err_msix:
2212 be_free_irq(adapter, &adapter->tx_eq, adapter);
2213
2214 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2215 be_free_irq(adapter, &rxo->rx_eq, rxo);
2216
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217err:
2218 dev_warn(&adapter->pdev->dev,
2219 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002220 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221 return status;
2222}
2223
2224static int be_irq_register(struct be_adapter *adapter)
2225{
2226 struct net_device *netdev = adapter->netdev;
2227 int status;
2228
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002229 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230 status = be_msix_register(adapter);
2231 if (status == 0)
2232 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002233 /* INTx is not supported for VF */
2234 if (!be_physfn(adapter))
2235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236 }
2237
2238 /* INTx */
2239 netdev->irq = adapter->pdev->irq;
2240 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2241 adapter);
2242 if (status) {
2243 dev_err(&adapter->pdev->dev,
2244 "INTx request IRQ failed - err %d\n", status);
2245 return status;
2246 }
2247done:
2248 adapter->isr_registered = true;
2249 return 0;
2250}
2251
2252static void be_irq_unregister(struct be_adapter *adapter)
2253{
2254 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002255 struct be_rx_obj *rxo;
2256 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257
2258 if (!adapter->isr_registered)
2259 return;
2260
2261 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002262 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263 free_irq(netdev->irq, adapter);
2264 goto done;
2265 }
2266
2267 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002268 be_free_irq(adapter, &adapter->tx_eq, adapter);
2269
2270 for_all_rx_queues(adapter, rxo, i)
2271 be_free_irq(adapter, &rxo->rx_eq, rxo);
2272
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273done:
2274 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275}
2276
Sathya Perla482c9e72011-06-29 23:33:17 +00002277static void be_rx_queues_clear(struct be_adapter *adapter)
2278{
2279 struct be_queue_info *q;
2280 struct be_rx_obj *rxo;
2281 int i;
2282
2283 for_all_rx_queues(adapter, rxo, i) {
2284 q = &rxo->q;
2285 if (q->created) {
2286 be_cmd_rxq_destroy(adapter, q);
2287 /* After the rxq is invalidated, wait for a grace time
2288 * of 1ms for all dma to end and the flush compl to
2289 * arrive
2290 */
2291 mdelay(1);
2292 be_rx_q_clean(adapter, rxo);
2293 }
2294
2295 /* Clear any residual events */
2296 q = &rxo->rx_eq.q;
2297 if (q->created)
2298 be_eq_clean(adapter, &rxo->rx_eq);
2299 }
2300}
2301
Sathya Perla889cd4b2010-05-30 23:33:45 +00002302static int be_close(struct net_device *netdev)
2303{
2304 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002305 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002306 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002307 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002308 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002309
Sathya Perla889cd4b2010-05-30 23:33:45 +00002310 be_async_mcc_disable(adapter);
2311
Sathya Perla889cd4b2010-05-30 23:33:45 +00002312 netif_carrier_off(netdev);
2313 adapter->link_up = false;
2314
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002315 if (!lancer_chip(adapter))
2316 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002317
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002318 for_all_rx_queues(adapter, rxo, i)
2319 napi_disable(&rxo->rx_eq.napi);
2320
2321 napi_disable(&tx_eq->napi);
2322
2323 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002324 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2325 for_all_rx_queues(adapter, rxo, i)
2326 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002327 for_all_tx_queues(adapter, txo, i)
2328 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002329 }
2330
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002331 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002332 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002333 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002334
2335 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002336 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002337 synchronize_irq(vec);
2338 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002339 } else {
2340 synchronize_irq(netdev->irq);
2341 }
2342 be_irq_unregister(adapter);
2343
Sathya Perla889cd4b2010-05-30 23:33:45 +00002344 /* Wait for all pending tx completions to arrive so that
2345 * all tx skbs are freed.
2346 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002347 for_all_tx_queues(adapter, txo, i)
2348 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002349
Sathya Perla482c9e72011-06-29 23:33:17 +00002350 be_rx_queues_clear(adapter);
2351 return 0;
2352}
2353
2354static int be_rx_queues_setup(struct be_adapter *adapter)
2355{
2356 struct be_rx_obj *rxo;
2357 int rc, i;
2358 u8 rsstable[MAX_RSS_QS];
2359
2360 for_all_rx_queues(adapter, rxo, i) {
2361 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2362 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2363 adapter->if_handle,
2364 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2365 if (rc)
2366 return rc;
2367 }
2368
2369 if (be_multi_rxq(adapter)) {
2370 for_all_rss_queues(adapter, rxo, i)
2371 rsstable[i] = rxo->rss_id;
2372
2373 rc = be_cmd_rss_config(adapter, rsstable,
2374 adapter->num_rx_qs - 1);
2375 if (rc)
2376 return rc;
2377 }
2378
2379 /* First time posting */
2380 for_all_rx_queues(adapter, rxo, i) {
2381 be_post_rx_frags(rxo, GFP_KERNEL);
2382 napi_enable(&rxo->rx_eq.napi);
2383 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002384 return 0;
2385}
2386
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387static int be_open(struct net_device *netdev)
2388{
2389 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002391 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002392 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002393 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002394 u8 mac_speed;
2395 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002396
Sathya Perla482c9e72011-06-29 23:33:17 +00002397 status = be_rx_queues_setup(adapter);
2398 if (status)
2399 goto err;
2400
Sathya Perla5fb379e2009-06-18 00:02:59 +00002401 napi_enable(&tx_eq->napi);
2402
2403 be_irq_register(adapter);
2404
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002405 if (!lancer_chip(adapter))
2406 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002407
2408 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002409 for_all_rx_queues(adapter, rxo, i) {
2410 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2411 be_cq_notify(adapter, rxo->cq.id, true, 0);
2412 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002413 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002414
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002415 /* Now that interrupts are on we can process async mcc */
2416 be_async_mcc_enable(adapter);
2417
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002418 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002419 &link_speed, 0);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002420 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002421 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002422 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002423
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002424 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002425 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002426 if (status)
2427 goto err;
2428
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002429 status = be_cmd_set_flow_control(adapter,
2430 adapter->tx_fc, adapter->rx_fc);
2431 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002432 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002433 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002434
Sathya Perla889cd4b2010-05-30 23:33:45 +00002435 return 0;
2436err:
2437 be_close(adapter->netdev);
2438 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002439}
2440
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002441static int be_setup_wol(struct be_adapter *adapter, bool enable)
2442{
2443 struct be_dma_mem cmd;
2444 int status = 0;
2445 u8 mac[ETH_ALEN];
2446
2447 memset(mac, 0, ETH_ALEN);
2448
2449 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002450 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2451 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002452 if (cmd.va == NULL)
2453 return -1;
2454 memset(cmd.va, 0, cmd.size);
2455
2456 if (enable) {
2457 status = pci_write_config_dword(adapter->pdev,
2458 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2459 if (status) {
2460 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002461 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002462 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2463 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002464 return status;
2465 }
2466 status = be_cmd_enable_magic_wol(adapter,
2467 adapter->netdev->dev_addr, &cmd);
2468 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2469 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2470 } else {
2471 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2472 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2473 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2474 }
2475
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002476 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002477 return status;
2478}
2479
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002480/*
2481 * Generate a seed MAC address from the PF MAC Address using jhash.
2482 * MAC Address for VFs are assigned incrementally starting from the seed.
2483 * These addresses are programmed in the ASIC by the PF and the VF driver
2484 * queries for the MAC address during its probe.
2485 */
2486static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2487{
2488 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002489 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002490 u8 mac[ETH_ALEN];
2491
2492 be_vf_eth_addr_generate(adapter, mac);
2493
2494 for (vf = 0; vf < num_vfs; vf++) {
2495 status = be_cmd_pmac_add(adapter, mac,
2496 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002497 &adapter->vf_cfg[vf].vf_pmac_id,
2498 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002499 if (status)
2500 dev_err(&adapter->pdev->dev,
2501 "Mac address add failed for VF %d\n", vf);
2502 else
2503 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2504
2505 mac[5] += 1;
2506 }
2507 return status;
2508}
2509
2510static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2511{
2512 u32 vf;
2513
2514 for (vf = 0; vf < num_vfs; vf++) {
2515 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2516 be_cmd_pmac_del(adapter,
2517 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002518 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002519 }
2520}
2521
Sathya Perla5fb379e2009-06-18 00:02:59 +00002522static int be_setup(struct be_adapter *adapter)
2523{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002524 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002525 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002526 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002527 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002528
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002529 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2530 BE_IF_FLAGS_BROADCAST |
2531 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002532
2533 if (be_physfn(adapter)) {
2534 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2535 BE_IF_FLAGS_PROMISCUOUS |
2536 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2537 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002538
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002539 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002540 cap_flags |= BE_IF_FLAGS_RSS;
2541 en_flags |= BE_IF_FLAGS_RSS;
2542 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002543 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002544
2545 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2546 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002547 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002548 if (status != 0)
2549 goto do_none;
2550
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002551 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002552 if (adapter->sriov_enabled) {
2553 while (vf < num_vfs) {
2554 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2555 BE_IF_FLAGS_BROADCAST;
2556 status = be_cmd_if_create(adapter, cap_flags,
2557 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002558 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002559 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002560 if (status) {
2561 dev_err(&adapter->pdev->dev,
2562 "Interface Create failed for VF %d\n",
2563 vf);
2564 goto if_destroy;
2565 }
2566 adapter->vf_cfg[vf].vf_pmac_id =
2567 BE_INVALID_PMAC_ID;
2568 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002569 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002570 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002571 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002572 status = be_cmd_mac_addr_query(adapter, mac,
2573 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2574 if (!status) {
2575 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2576 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2577 }
2578 }
2579
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002580 status = be_tx_queues_create(adapter);
2581 if (status != 0)
2582 goto if_destroy;
2583
2584 status = be_rx_queues_create(adapter);
2585 if (status != 0)
2586 goto tx_qs_destroy;
2587
Sathya Perla2903dd62011-06-26 20:41:53 +00002588 /* Allow all priorities by default. A GRP5 evt may modify this */
2589 adapter->vlan_prio_bmap = 0xff;
2590
Sathya Perla5fb379e2009-06-18 00:02:59 +00002591 status = be_mcc_queues_create(adapter);
2592 if (status != 0)
2593 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002594
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002595 adapter->link_speed = -1;
2596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597 return 0;
2598
Sathya Perla5fb379e2009-06-18 00:02:59 +00002599rx_qs_destroy:
2600 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002601tx_qs_destroy:
2602 be_tx_queues_destroy(adapter);
2603if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002604 if (be_physfn(adapter) && adapter->sriov_enabled)
2605 for (vf = 0; vf < num_vfs; vf++)
2606 if (adapter->vf_cfg[vf].vf_if_handle)
2607 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002608 adapter->vf_cfg[vf].vf_if_handle,
2609 vf + 1);
2610 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002611do_none:
2612 return status;
2613}
2614
Sathya Perla5fb379e2009-06-18 00:02:59 +00002615static int be_clear(struct be_adapter *adapter)
2616{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002617 int vf;
2618
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002619 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002620 be_vf_eth_addr_rem(adapter);
2621
Sathya Perla1a8887d2009-08-17 00:58:41 +00002622 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002623 be_rx_queues_destroy(adapter);
2624 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002625 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002626
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002627 if (be_physfn(adapter) && adapter->sriov_enabled)
2628 for (vf = 0; vf < num_vfs; vf++)
2629 if (adapter->vf_cfg[vf].vf_if_handle)
2630 be_cmd_if_destroy(adapter,
2631 adapter->vf_cfg[vf].vf_if_handle,
2632 vf + 1);
2633
Ajit Khaparde658681f2011-02-11 13:34:46 +00002634 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002635
Sathya Perla2243e2e2009-11-22 22:02:03 +00002636 /* tell fw we're done with firing cmds */
2637 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002638 return 0;
2639}
2640
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641
Ajit Khaparde84517482009-09-04 03:12:16 +00002642#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002643static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002644 const u8 *p, u32 img_start, int image_size,
2645 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002646{
2647 u32 crc_offset;
2648 u8 flashed_crc[4];
2649 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002650
2651 crc_offset = hdr_size + img_start + image_size - 4;
2652
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002653 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002654
2655 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002656 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002657 if (status) {
2658 dev_err(&adapter->pdev->dev,
2659 "could not get crc from flash, not flashing redboot\n");
2660 return false;
2661 }
2662
2663 /*update redboot only if crc does not match*/
2664 if (!memcmp(flashed_crc, p, 4))
2665 return false;
2666 else
2667 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002668}
2669
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002670static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002671 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002672 struct be_dma_mem *flash_cmd, int num_of_images)
2673
Ajit Khaparde84517482009-09-04 03:12:16 +00002674{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002675 int status = 0, i, filehdr_size = 0;
2676 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002677 int num_bytes;
2678 const u8 *p = fw->data;
2679 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002680 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002681 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002682
Joe Perches215faf92010-12-21 02:16:10 -08002683 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002684 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2685 FLASH_IMAGE_MAX_SIZE_g3},
2686 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2687 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2688 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2689 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2690 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2691 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2692 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2693 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2694 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2695 FLASH_IMAGE_MAX_SIZE_g3},
2696 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2697 FLASH_IMAGE_MAX_SIZE_g3},
2698 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002699 FLASH_IMAGE_MAX_SIZE_g3},
2700 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2701 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002702 };
Joe Perches215faf92010-12-21 02:16:10 -08002703 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002704 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2705 FLASH_IMAGE_MAX_SIZE_g2},
2706 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2707 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2708 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2709 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2710 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2711 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2712 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2713 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2714 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2715 FLASH_IMAGE_MAX_SIZE_g2},
2716 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2717 FLASH_IMAGE_MAX_SIZE_g2},
2718 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2719 FLASH_IMAGE_MAX_SIZE_g2}
2720 };
2721
2722 if (adapter->generation == BE_GEN3) {
2723 pflashcomp = gen3_flash_types;
2724 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002725 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002726 } else {
2727 pflashcomp = gen2_flash_types;
2728 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002729 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002730 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002731 for (i = 0; i < num_comp; i++) {
2732 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2733 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2734 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002735 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2736 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002737 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2738 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002739 continue;
2740 p = fw->data;
2741 p += filehdr_size + pflashcomp[i].offset
2742 + (num_of_images * sizeof(struct image_hdr));
2743 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002744 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002745 total_bytes = pflashcomp[i].size;
2746 while (total_bytes) {
2747 if (total_bytes > 32*1024)
2748 num_bytes = 32*1024;
2749 else
2750 num_bytes = total_bytes;
2751 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002752
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002753 if (!total_bytes)
2754 flash_op = FLASHROM_OPER_FLASH;
2755 else
2756 flash_op = FLASHROM_OPER_SAVE;
2757 memcpy(req->params.data_buf, p, num_bytes);
2758 p += num_bytes;
2759 status = be_cmd_write_flashrom(adapter, flash_cmd,
2760 pflashcomp[i].optype, flash_op, num_bytes);
2761 if (status) {
2762 dev_err(&adapter->pdev->dev,
2763 "cmd to write to flash rom failed.\n");
2764 return -1;
2765 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002766 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002767 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002768 return 0;
2769}
2770
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002771static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2772{
2773 if (fhdr == NULL)
2774 return 0;
2775 if (fhdr->build[0] == '3')
2776 return BE_GEN3;
2777 else if (fhdr->build[0] == '2')
2778 return BE_GEN2;
2779 else
2780 return 0;
2781}
2782
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002783static int lancer_fw_download(struct be_adapter *adapter,
2784 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002785{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002786#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2787#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2788 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002789 const u8 *data_ptr = NULL;
2790 u8 *dest_image_ptr = NULL;
2791 size_t image_size = 0;
2792 u32 chunk_size = 0;
2793 u32 data_written = 0;
2794 u32 offset = 0;
2795 int status = 0;
2796 u8 add_status = 0;
2797
2798 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2799 dev_err(&adapter->pdev->dev,
2800 "FW Image not properly aligned. "
2801 "Length must be 4 byte aligned.\n");
2802 status = -EINVAL;
2803 goto lancer_fw_exit;
2804 }
2805
2806 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2807 + LANCER_FW_DOWNLOAD_CHUNK;
2808 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2809 &flash_cmd.dma, GFP_KERNEL);
2810 if (!flash_cmd.va) {
2811 status = -ENOMEM;
2812 dev_err(&adapter->pdev->dev,
2813 "Memory allocation failure while flashing\n");
2814 goto lancer_fw_exit;
2815 }
2816
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002817 dest_image_ptr = flash_cmd.va +
2818 sizeof(struct lancer_cmd_req_write_object);
2819 image_size = fw->size;
2820 data_ptr = fw->data;
2821
2822 while (image_size) {
2823 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2824
2825 /* Copy the image chunk content. */
2826 memcpy(dest_image_ptr, data_ptr, chunk_size);
2827
2828 status = lancer_cmd_write_object(adapter, &flash_cmd,
2829 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2830 &data_written, &add_status);
2831
2832 if (status)
2833 break;
2834
2835 offset += data_written;
2836 data_ptr += data_written;
2837 image_size -= data_written;
2838 }
2839
2840 if (!status) {
2841 /* Commit the FW written */
2842 status = lancer_cmd_write_object(adapter, &flash_cmd,
2843 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2844 &data_written, &add_status);
2845 }
2846
2847 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2848 flash_cmd.dma);
2849 if (status) {
2850 dev_err(&adapter->pdev->dev,
2851 "Firmware load error. "
2852 "Status code: 0x%x Additional Status: 0x%x\n",
2853 status, add_status);
2854 goto lancer_fw_exit;
2855 }
2856
2857 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2858lancer_fw_exit:
2859 return status;
2860}
2861
2862static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2863{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002864 struct flash_file_hdr_g2 *fhdr;
2865 struct flash_file_hdr_g3 *fhdr3;
2866 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002867 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002868 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002869 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002870
2871 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002872 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002873
Ajit Khaparde84517482009-09-04 03:12:16 +00002874 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002875 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2876 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002877 if (!flash_cmd.va) {
2878 status = -ENOMEM;
2879 dev_err(&adapter->pdev->dev,
2880 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002881 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002882 }
2883
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002884 if ((adapter->generation == BE_GEN3) &&
2885 (get_ufigen_type(fhdr) == BE_GEN3)) {
2886 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002887 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2888 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002889 img_hdr_ptr = (struct image_hdr *) (fw->data +
2890 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002891 i * sizeof(struct image_hdr)));
2892 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2893 status = be_flash_data(adapter, fw, &flash_cmd,
2894 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002895 }
2896 } else if ((adapter->generation == BE_GEN2) &&
2897 (get_ufigen_type(fhdr) == BE_GEN2)) {
2898 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2899 } else {
2900 dev_err(&adapter->pdev->dev,
2901 "UFI and Interface are not compatible for flashing\n");
2902 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002903 }
2904
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002905 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2906 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002907 if (status) {
2908 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002909 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002910 }
2911
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002912 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002913
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002914be_fw_exit:
2915 return status;
2916}
2917
2918int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2919{
2920 const struct firmware *fw;
2921 int status;
2922
2923 if (!netif_running(adapter->netdev)) {
2924 dev_err(&adapter->pdev->dev,
2925 "Firmware load not allowed (interface is down)\n");
2926 return -1;
2927 }
2928
2929 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2930 if (status)
2931 goto fw_exit;
2932
2933 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2934
2935 if (lancer_chip(adapter))
2936 status = lancer_fw_download(adapter, fw);
2937 else
2938 status = be_fw_download(adapter, fw);
2939
Ajit Khaparde84517482009-09-04 03:12:16 +00002940fw_exit:
2941 release_firmware(fw);
2942 return status;
2943}
2944
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002945static struct net_device_ops be_netdev_ops = {
2946 .ndo_open = be_open,
2947 .ndo_stop = be_close,
2948 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002949 .ndo_set_rx_mode = be_set_multicast_list,
2950 .ndo_set_mac_address = be_mac_addr_set,
2951 .ndo_change_mtu = be_change_mtu,
2952 .ndo_validate_addr = eth_validate_addr,
2953 .ndo_vlan_rx_register = be_vlan_register,
2954 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2955 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002956 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002957 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002958 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002959 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002960};
2961
2962static void be_netdev_init(struct net_device *netdev)
2963{
2964 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002965 struct be_rx_obj *rxo;
2966 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002967
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002968 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002969 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2970 NETIF_F_HW_VLAN_TX;
2971 if (be_multi_rxq(adapter))
2972 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002973
2974 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002975 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002976
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002977 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002978 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002979
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980 netdev->flags |= IFF_MULTICAST;
2981
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002982 /* Default settings for Rx and Tx flow control */
2983 adapter->rx_fc = true;
2984 adapter->tx_fc = true;
2985
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002986 netif_set_gso_max_size(netdev, 65535);
2987
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002988 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2989
2990 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2991
Sathya Perla3abcded2010-10-03 22:12:27 -07002992 for_all_rx_queues(adapter, rxo, i)
2993 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2994 BE_NAPI_WEIGHT);
2995
Sathya Perla5fb379e2009-06-18 00:02:59 +00002996 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002997 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998}
2999
3000static void be_unmap_pci_bars(struct be_adapter *adapter)
3001{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003002 if (adapter->csr)
3003 iounmap(adapter->csr);
3004 if (adapter->db)
3005 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003006 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00003007 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003008}
3009
3010static int be_map_pci_bars(struct be_adapter *adapter)
3011{
3012 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003013 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003014
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003015 if (lancer_chip(adapter)) {
3016 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3017 pci_resource_len(adapter->pdev, 0));
3018 if (addr == NULL)
3019 return -ENOMEM;
3020 adapter->db = addr;
3021 return 0;
3022 }
3023
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003024 if (be_physfn(adapter)) {
3025 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3026 pci_resource_len(adapter->pdev, 2));
3027 if (addr == NULL)
3028 return -ENOMEM;
3029 adapter->csr = addr;
3030 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003031
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003032 if (adapter->generation == BE_GEN2) {
3033 pcicfg_reg = 1;
3034 db_reg = 4;
3035 } else {
3036 pcicfg_reg = 0;
3037 if (be_physfn(adapter))
3038 db_reg = 4;
3039 else
3040 db_reg = 0;
3041 }
3042 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3043 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003044 if (addr == NULL)
3045 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003046 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003048 if (be_physfn(adapter)) {
3049 addr = ioremap_nocache(
3050 pci_resource_start(adapter->pdev, pcicfg_reg),
3051 pci_resource_len(adapter->pdev, pcicfg_reg));
3052 if (addr == NULL)
3053 goto pci_map_err;
3054 adapter->pcicfg = addr;
3055 } else
3056 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003057
3058 return 0;
3059pci_map_err:
3060 be_unmap_pci_bars(adapter);
3061 return -ENOMEM;
3062}
3063
3064
3065static void be_ctrl_cleanup(struct be_adapter *adapter)
3066{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003067 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003068
3069 be_unmap_pci_bars(adapter);
3070
3071 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003072 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3073 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003074
3075 mem = &adapter->mc_cmd_mem;
3076 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003077 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3078 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003079}
3080
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003081static int be_ctrl_init(struct be_adapter *adapter)
3082{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003083 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3084 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003085 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003087
3088 status = be_map_pci_bars(adapter);
3089 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003090 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003091
3092 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003093 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3094 mbox_mem_alloc->size,
3095 &mbox_mem_alloc->dma,
3096 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003097 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003098 status = -ENOMEM;
3099 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003100 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00003101
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003102 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3103 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3104 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3105 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003106
3107 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003108 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3109 mc_cmd_mem->size, &mc_cmd_mem->dma,
3110 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003111 if (mc_cmd_mem->va == NULL) {
3112 status = -ENOMEM;
3113 goto free_mbox;
3114 }
3115 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3116
Ivan Vecera29849612010-12-14 05:43:19 +00003117 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003118 spin_lock_init(&adapter->mcc_lock);
3119 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003121 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003122 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003123 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003124
3125free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003126 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3127 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003128
3129unmap_pci_bars:
3130 be_unmap_pci_bars(adapter);
3131
3132done:
3133 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003134}
3135
3136static void be_stats_cleanup(struct be_adapter *adapter)
3137{
Sathya Perla3abcded2010-10-03 22:12:27 -07003138 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139
3140 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003141 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3142 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143}
3144
3145static int be_stats_init(struct be_adapter *adapter)
3146{
Sathya Perla3abcded2010-10-03 22:12:27 -07003147 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148
Selvin Xavier005d5692011-05-16 07:36:35 +00003149 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003150 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003151 } else {
3152 if (lancer_chip(adapter))
3153 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3154 else
3155 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3156 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003157 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3158 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003159 if (cmd->va == NULL)
3160 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003161 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003162 return 0;
3163}
3164
3165static void __devexit be_remove(struct pci_dev *pdev)
3166{
3167 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003168
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003169 if (!adapter)
3170 return;
3171
Somnath Koturf203af72010-10-25 23:01:03 +00003172 cancel_delayed_work_sync(&adapter->work);
3173
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003174 unregister_netdev(adapter->netdev);
3175
Sathya Perla5fb379e2009-06-18 00:02:59 +00003176 be_clear(adapter);
3177
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003178 be_stats_cleanup(adapter);
3179
3180 be_ctrl_cleanup(adapter);
3181
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003182 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003183 be_sriov_disable(adapter);
3184
Sathya Perla8d56ff12009-11-22 22:02:26 +00003185 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003186
3187 pci_set_drvdata(pdev, NULL);
3188 pci_release_regions(pdev);
3189 pci_disable_device(pdev);
3190
3191 free_netdev(adapter->netdev);
3192}
3193
Sathya Perla2243e2e2009-11-22 22:02:03 +00003194static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003195{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003197 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003198
Sathya Perla8788fdc2009-07-27 22:52:03 +00003199 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003200 if (status)
3201 return status;
3202
Sathya Perla3abcded2010-10-03 22:12:27 -07003203 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3204 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003205 if (status)
3206 return status;
3207
3208 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003209
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003210 /* A default permanent address is given to each VF for Lancer*/
3211 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003212 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003213 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003214
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003215 if (status)
3216 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003217
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003218 if (!is_valid_ether_addr(mac))
3219 return -EADDRNOTAVAIL;
3220
3221 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3222 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3223 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003224
Ajit Khaparde3486be22010-07-23 02:04:54 +00003225 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003226 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3227 else
3228 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3229
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003230 status = be_cmd_get_cntl_attributes(adapter);
3231 if (status)
3232 return status;
3233
Sathya Perla2e588f82011-03-11 02:49:26 +00003234 be_cmd_check_native_mode(adapter);
Sathya Perla3c8def92011-06-12 20:01:58 +00003235
3236 if ((num_vfs && adapter->sriov_enabled) ||
3237 (adapter->function_mode & 0x400) ||
3238 lancer_chip(adapter) || !be_physfn(adapter)) {
3239 adapter->num_tx_qs = 1;
3240 netif_set_real_num_tx_queues(adapter->netdev,
3241 adapter->num_tx_qs);
3242 } else {
3243 adapter->num_tx_qs = MAX_TX_QS;
3244 }
3245
Sathya Perla2243e2e2009-11-22 22:02:03 +00003246 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003247}
3248
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003249static int be_dev_family_check(struct be_adapter *adapter)
3250{
3251 struct pci_dev *pdev = adapter->pdev;
3252 u32 sli_intf = 0, if_type;
3253
3254 switch (pdev->device) {
3255 case BE_DEVICE_ID1:
3256 case OC_DEVICE_ID1:
3257 adapter->generation = BE_GEN2;
3258 break;
3259 case BE_DEVICE_ID2:
3260 case OC_DEVICE_ID2:
3261 adapter->generation = BE_GEN3;
3262 break;
3263 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003264 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003265 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3266 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3267 SLI_INTF_IF_TYPE_SHIFT;
3268
3269 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3270 if_type != 0x02) {
3271 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3272 return -EINVAL;
3273 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003274 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3275 SLI_INTF_FAMILY_SHIFT);
3276 adapter->generation = BE_GEN3;
3277 break;
3278 default:
3279 adapter->generation = 0;
3280 }
3281 return 0;
3282}
3283
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003284static int lancer_wait_ready(struct be_adapter *adapter)
3285{
3286#define SLIPORT_READY_TIMEOUT 500
3287 u32 sliport_status;
3288 int status = 0, i;
3289
3290 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3291 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3292 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3293 break;
3294
3295 msleep(20);
3296 }
3297
3298 if (i == SLIPORT_READY_TIMEOUT)
3299 status = -1;
3300
3301 return status;
3302}
3303
3304static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3305{
3306 int status;
3307 u32 sliport_status, err, reset_needed;
3308 status = lancer_wait_ready(adapter);
3309 if (!status) {
3310 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3311 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3312 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3313 if (err && reset_needed) {
3314 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3315 adapter->db + SLIPORT_CONTROL_OFFSET);
3316
3317 /* check adapter has corrected the error */
3318 status = lancer_wait_ready(adapter);
3319 sliport_status = ioread32(adapter->db +
3320 SLIPORT_STATUS_OFFSET);
3321 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3322 SLIPORT_STATUS_RN_MASK);
3323 if (status || sliport_status)
3324 status = -1;
3325 } else if (err || reset_needed) {
3326 status = -1;
3327 }
3328 }
3329 return status;
3330}
3331
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003332static int __devinit be_probe(struct pci_dev *pdev,
3333 const struct pci_device_id *pdev_id)
3334{
3335 int status = 0;
3336 struct be_adapter *adapter;
3337 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003338
3339 status = pci_enable_device(pdev);
3340 if (status)
3341 goto do_none;
3342
3343 status = pci_request_regions(pdev, DRV_NAME);
3344 if (status)
3345 goto disable_dev;
3346 pci_set_master(pdev);
3347
Sathya Perla3c8def92011-06-12 20:01:58 +00003348 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003349 if (netdev == NULL) {
3350 status = -ENOMEM;
3351 goto rel_reg;
3352 }
3353 adapter = netdev_priv(netdev);
3354 adapter->pdev = pdev;
3355 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003356
3357 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003358 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003359 goto free_netdev;
3360
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003361 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003362 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003363
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003364 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003365 if (!status) {
3366 netdev->features |= NETIF_F_HIGHDMA;
3367 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003368 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003369 if (status) {
3370 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3371 goto free_netdev;
3372 }
3373 }
3374
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003375 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003376 if (adapter->sriov_enabled) {
3377 adapter->vf_cfg = kcalloc(num_vfs,
3378 sizeof(struct be_vf_cfg), GFP_KERNEL);
3379
3380 if (!adapter->vf_cfg)
3381 goto free_netdev;
3382 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003383
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003384 status = be_ctrl_init(adapter);
3385 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003386 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003387
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003388 if (lancer_chip(adapter)) {
3389 status = lancer_test_and_set_rdy_state(adapter);
3390 if (status) {
3391 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003392 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003393 }
3394 }
3395
Sathya Perla2243e2e2009-11-22 22:02:03 +00003396 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003397 if (be_physfn(adapter)) {
3398 status = be_cmd_POST(adapter);
3399 if (status)
3400 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003401 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003402
3403 /* tell fw we're ready to fire cmds */
3404 status = be_cmd_fw_init(adapter);
3405 if (status)
3406 goto ctrl_clean;
3407
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003408 status = be_cmd_reset_function(adapter);
3409 if (status)
3410 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003412 status = be_stats_init(adapter);
3413 if (status)
3414 goto ctrl_clean;
3415
Sathya Perla2243e2e2009-11-22 22:02:03 +00003416 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003417 if (status)
3418 goto stats_clean;
3419
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003420 /* The INTR bit may be set in the card when probed by a kdump kernel
3421 * after a crash.
3422 */
3423 if (!lancer_chip(adapter))
3424 be_intr_set(adapter, false);
3425
Sathya Perla3abcded2010-10-03 22:12:27 -07003426 be_msix_enable(adapter);
3427
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003428 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003429
Sathya Perla5fb379e2009-06-18 00:02:59 +00003430 status = be_setup(adapter);
3431 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003432 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003433
Sathya Perla3abcded2010-10-03 22:12:27 -07003434 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003435 status = register_netdev(netdev);
3436 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003437 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003438 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003439
Ajit Khapardee6319362011-02-11 13:35:41 +00003440 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003441 u8 mac_speed;
3442 bool link_up;
3443 u16 vf, lnk_speed;
3444
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003445 if (!lancer_chip(adapter)) {
3446 status = be_vf_eth_addr_config(adapter);
3447 if (status)
3448 goto unreg_netdev;
3449 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003450
3451 for (vf = 0; vf < num_vfs; vf++) {
3452 status = be_cmd_link_status_query(adapter, &link_up,
3453 &mac_speed, &lnk_speed, vf + 1);
3454 if (!status)
3455 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3456 else
3457 goto unreg_netdev;
3458 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003459 }
3460
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003461 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003462
Somnath Koturf203af72010-10-25 23:01:03 +00003463 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003464 return 0;
3465
Ajit Khapardee6319362011-02-11 13:35:41 +00003466unreg_netdev:
3467 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003468unsetup:
3469 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003470msix_disable:
3471 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003472stats_clean:
3473 be_stats_cleanup(adapter);
3474ctrl_clean:
3475 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003476free_vf_cfg:
3477 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003478free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003479 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003480 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003481 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003482rel_reg:
3483 pci_release_regions(pdev);
3484disable_dev:
3485 pci_disable_device(pdev);
3486do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003487 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003488 return status;
3489}
3490
3491static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3492{
3493 struct be_adapter *adapter = pci_get_drvdata(pdev);
3494 struct net_device *netdev = adapter->netdev;
3495
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003496 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003497 if (adapter->wol)
3498 be_setup_wol(adapter, true);
3499
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003500 netif_device_detach(netdev);
3501 if (netif_running(netdev)) {
3502 rtnl_lock();
3503 be_close(netdev);
3504 rtnl_unlock();
3505 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003506 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003507 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003508
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003509 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003510 pci_save_state(pdev);
3511 pci_disable_device(pdev);
3512 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3513 return 0;
3514}
3515
3516static int be_resume(struct pci_dev *pdev)
3517{
3518 int status = 0;
3519 struct be_adapter *adapter = pci_get_drvdata(pdev);
3520 struct net_device *netdev = adapter->netdev;
3521
3522 netif_device_detach(netdev);
3523
3524 status = pci_enable_device(pdev);
3525 if (status)
3526 return status;
3527
3528 pci_set_power_state(pdev, 0);
3529 pci_restore_state(pdev);
3530
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003531 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003532 /* tell fw we're ready to fire cmds */
3533 status = be_cmd_fw_init(adapter);
3534 if (status)
3535 return status;
3536
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003537 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003538 if (netif_running(netdev)) {
3539 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003540 be_open(netdev);
3541 rtnl_unlock();
3542 }
3543 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003544
3545 if (adapter->wol)
3546 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003547
3548 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003549 return 0;
3550}
3551
Sathya Perla82456b02010-02-17 01:35:37 +00003552/*
3553 * An FLR will stop BE from DMAing any data.
3554 */
3555static void be_shutdown(struct pci_dev *pdev)
3556{
3557 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003558
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003559 if (!adapter)
3560 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003561
Sathya Perla0f4a6822011-03-21 20:49:28 +00003562 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003563
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003564 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003565
Sathya Perla82456b02010-02-17 01:35:37 +00003566 if (adapter->wol)
3567 be_setup_wol(adapter, true);
3568
Ajit Khaparde57841862011-04-06 18:08:43 +00003569 be_cmd_reset_function(adapter);
3570
Sathya Perla82456b02010-02-17 01:35:37 +00003571 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003572}
3573
Sathya Perlacf588472010-02-14 21:22:01 +00003574static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3575 pci_channel_state_t state)
3576{
3577 struct be_adapter *adapter = pci_get_drvdata(pdev);
3578 struct net_device *netdev = adapter->netdev;
3579
3580 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3581
3582 adapter->eeh_err = true;
3583
3584 netif_device_detach(netdev);
3585
3586 if (netif_running(netdev)) {
3587 rtnl_lock();
3588 be_close(netdev);
3589 rtnl_unlock();
3590 }
3591 be_clear(adapter);
3592
3593 if (state == pci_channel_io_perm_failure)
3594 return PCI_ERS_RESULT_DISCONNECT;
3595
3596 pci_disable_device(pdev);
3597
3598 return PCI_ERS_RESULT_NEED_RESET;
3599}
3600
3601static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3602{
3603 struct be_adapter *adapter = pci_get_drvdata(pdev);
3604 int status;
3605
3606 dev_info(&adapter->pdev->dev, "EEH reset\n");
3607 adapter->eeh_err = false;
3608
3609 status = pci_enable_device(pdev);
3610 if (status)
3611 return PCI_ERS_RESULT_DISCONNECT;
3612
3613 pci_set_master(pdev);
3614 pci_set_power_state(pdev, 0);
3615 pci_restore_state(pdev);
3616
3617 /* Check if card is ok and fw is ready */
3618 status = be_cmd_POST(adapter);
3619 if (status)
3620 return PCI_ERS_RESULT_DISCONNECT;
3621
3622 return PCI_ERS_RESULT_RECOVERED;
3623}
3624
3625static void be_eeh_resume(struct pci_dev *pdev)
3626{
3627 int status = 0;
3628 struct be_adapter *adapter = pci_get_drvdata(pdev);
3629 struct net_device *netdev = adapter->netdev;
3630
3631 dev_info(&adapter->pdev->dev, "EEH resume\n");
3632
3633 pci_save_state(pdev);
3634
3635 /* tell fw we're ready to fire cmds */
3636 status = be_cmd_fw_init(adapter);
3637 if (status)
3638 goto err;
3639
3640 status = be_setup(adapter);
3641 if (status)
3642 goto err;
3643
3644 if (netif_running(netdev)) {
3645 status = be_open(netdev);
3646 if (status)
3647 goto err;
3648 }
3649 netif_device_attach(netdev);
3650 return;
3651err:
3652 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003653}
3654
3655static struct pci_error_handlers be_eeh_handlers = {
3656 .error_detected = be_eeh_err_detected,
3657 .slot_reset = be_eeh_reset,
3658 .resume = be_eeh_resume,
3659};
3660
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003661static struct pci_driver be_driver = {
3662 .name = DRV_NAME,
3663 .id_table = be_dev_ids,
3664 .probe = be_probe,
3665 .remove = be_remove,
3666 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003667 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003668 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003669 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003670};
3671
3672static int __init be_init_module(void)
3673{
Joe Perches8e95a202009-12-03 07:58:21 +00003674 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3675 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003676 printk(KERN_WARNING DRV_NAME
3677 " : Module param rx_frag_size must be 2048/4096/8192."
3678 " Using 2048\n");
3679 rx_frag_size = 2048;
3680 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003681
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003682 return pci_register_driver(&be_driver);
3683}
3684module_init(be_init_module);
3685
3686static void __exit be_exit_module(void)
3687{
3688 pci_unregister_driver(&be_driver);
3689}
3690module_exit(be_exit_module);