blob: cff2cca3087dc2d35dee41f7474709461aec4ec7 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
Sathya Perla2e588f82011-03-11 02:49:26 +000028static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000030module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000149
Sathya Perlacf588472010-02-14 21:22:01 +0000150 if (adapter->eeh_err)
151 return;
152
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 iowrite32(reg, addr);
161}
162
Sathya Perla8788fdc2009-07-27 22:52:03 +0000163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000168
169 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171}
172
Sathya Perla8788fdc2009-07-27 22:52:03 +0000173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000178
179 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700181}
182
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000190
191 if (adapter->eeh_err)
192 return;
193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201}
202
Sathya Perla8788fdc2009-07-27 22:52:03 +0000203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000209
210 if (adapter->eeh_err)
211 return;
212
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
Ajit Khapardef8617e02011-02-11 13:36:37 +0000234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000236 if (status)
237 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700238
Sathya Perlaa65027e2009-08-17 00:58:04 +0000239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000240 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000241netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000248static void populate_be2_stats(struct be_adapter *adapter)
249{
250
251 struct be_drv_stats *drvs = &adapter->drv_stats;
252 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
253 struct be_port_rxf_stats_v0 *port_stats =
254 be_port_rxf_stats_from_cmd(adapter);
255 struct be_rxf_stats_v0 *rxf_stats =
256 be_rxf_stats_from_cmd(adapter);
257
258 drvs->rx_pause_frames = port_stats->rx_pause_frames;
259 drvs->rx_crc_errors = port_stats->rx_crc_errors;
260 drvs->rx_control_frames = port_stats->rx_control_frames;
261 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
262 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
263 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
264 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
265 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
266 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
267 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
268 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
269 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
270 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
271 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
272 drvs->rx_input_fifo_overflow_drop =
273 port_stats->rx_input_fifo_overflow;
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
276 drvs->rx_address_match_errors =
277 port_stats->rx_address_match_errors;
278 drvs->rx_alignment_symbol_errors =
279 port_stats->rx_alignment_symbol_errors;
280
281 drvs->tx_pauseframes = port_stats->tx_pauseframes;
282 drvs->tx_controlframes = port_stats->tx_controlframes;
283
284 if (adapter->port_num)
285 drvs->jabber_events =
286 rxf_stats->port1_jabber_events;
287 else
288 drvs->jabber_events =
289 rxf_stats->port0_jabber_events;
290 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
291 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
292 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
293 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
294 drvs->forwarded_packets = rxf_stats->forwarded_packets;
295 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
296 drvs->rx_drops_no_tpre_descr =
297 rxf_stats->rx_drops_no_tpre_descr;
298 drvs->rx_drops_too_many_frags =
299 rxf_stats->rx_drops_too_many_frags;
300 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
301}
302
303static void populate_be3_stats(struct be_adapter *adapter)
304{
305 struct be_drv_stats *drvs = &adapter->drv_stats;
306 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
307
308 struct be_rxf_stats_v1 *rxf_stats =
309 be_rxf_stats_from_cmd(adapter);
310 struct be_port_rxf_stats_v1 *port_stats =
311 be_port_rxf_stats_from_cmd(adapter);
312
313 drvs->rx_priority_pause_frames = 0;
314 drvs->pmem_fifo_overflow_drop = 0;
315 drvs->rx_pause_frames = port_stats->rx_pause_frames;
316 drvs->rx_crc_errors = port_stats->rx_crc_errors;
317 drvs->rx_control_frames = port_stats->rx_control_frames;
318 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
319 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
320 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
321 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
322 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
323 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
324 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
325 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
326 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
327 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
328 drvs->rx_dropped_header_too_small =
329 port_stats->rx_dropped_header_too_small;
330 drvs->rx_input_fifo_overflow_drop =
331 port_stats->rx_input_fifo_overflow_drop;
332 drvs->rx_address_match_errors =
333 port_stats->rx_address_match_errors;
334 drvs->rx_alignment_symbol_errors =
335 port_stats->rx_alignment_symbol_errors;
336 drvs->rxpp_fifo_overflow_drop =
337 port_stats->rxpp_fifo_overflow_drop;
338 drvs->tx_pauseframes = port_stats->tx_pauseframes;
339 drvs->tx_controlframes = port_stats->tx_controlframes;
340 drvs->jabber_events = port_stats->jabber_events;
341 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
342 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
343 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
344 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
345 drvs->forwarded_packets = rxf_stats->forwarded_packets;
346 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
347 drvs->rx_drops_no_tpre_descr =
348 rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags =
350 rxf_stats->rx_drops_too_many_frags;
351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
354
355
356void be_parse_stats(struct be_adapter *adapter)
357{
358 if (adapter->generation == BE_GEN3)
359 populate_be3_stats(adapter);
360 else
361 populate_be2_stats(adapter);
362}
363
Sathya Perlab31c50a2009-09-17 10:30:13 -0700364void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700365{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde78122a52009-10-07 03:11:20 -0700367 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700368 struct be_rx_obj *rxo;
369 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700370
Sathya Perla3abcded2010-10-03 22:12:27 -0700371 memset(dev_stats, 0, sizeof(*dev_stats));
372 for_all_rx_queues(adapter, rxo, i) {
373 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
374 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
375 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
376 /* no space in linux buffers: best possible approximation */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377 if (adapter->generation == BE_GEN3) {
378 struct be_erx_stats_v1 *erx_stats =
379 be_erx_stats_from_cmd(adapter);
380 dev_stats->rx_dropped +=
381 erx_stats->rx_drops_no_fragments[rxo->q.id];
382 } else {
383 struct be_erx_stats_v0 *erx_stats =
384 be_erx_stats_from_cmd(adapter);
385 dev_stats->rx_dropped +=
386 erx_stats->rx_drops_no_fragments[rxo->q.id];
387 }
Sathya Perla3abcded2010-10-03 22:12:27 -0700388 }
389
390 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
391 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700392
393 /* bad pkts received */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 dev_stats->rx_errors = drvs->rx_crc_errors +
395 drvs->rx_alignment_symbol_errors +
396 drvs->rx_in_range_errors +
397 drvs->rx_out_range_errors +
398 drvs->rx_frame_too_long +
399 drvs->rx_dropped_too_small +
400 drvs->rx_dropped_too_short +
401 drvs->rx_dropped_header_too_small +
402 drvs->rx_dropped_tcp_length +
403 drvs->rx_dropped_runt +
404 drvs->rx_tcp_checksum_errs +
405 drvs->rx_ip_checksum_errs +
406 drvs->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700407
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700408 /* detailed rx errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
410 drvs->rx_out_range_errors +
411 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000412
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700414
415 /* frame alignment errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000417
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700418 /* receiver fifo overrun */
419 /* drops_no_pbuf is no per i/f, it's per BE card */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
421 drvs->rx_input_fifo_overflow_drop +
422 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700423}
424
Sathya Perla8788fdc2009-07-27 22:52:03 +0000425void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700426{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700427 struct net_device *netdev = adapter->netdev;
428
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700429 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000430 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000431 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000432 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700433 netif_carrier_on(netdev);
434 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000435 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000436 netif_carrier_off(netdev);
437 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700438 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000439 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700440 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441}
442
443/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700444static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700445{
Sathya Perla3abcded2010-10-03 22:12:27 -0700446 struct be_eq_obj *rx_eq = &rxo->rx_eq;
447 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700448 ulong now = jiffies;
449 u32 eqd;
450
451 if (!rx_eq->enable_aic)
452 return;
453
454 /* Wrapped around */
455 if (time_before(now, stats->rx_fps_jiffies)) {
456 stats->rx_fps_jiffies = now;
457 return;
458 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700459
460 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700461 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700462 return;
463
Sathya Perla3abcded2010-10-03 22:12:27 -0700464 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700465 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700466
Sathya Perla4097f662009-03-24 16:40:13 -0700467 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700468 stats->prev_rx_frags = stats->rx_frags;
469 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700470 eqd = eqd << 3;
471 if (eqd > rx_eq->max_eqd)
472 eqd = rx_eq->max_eqd;
473 if (eqd < rx_eq->min_eqd)
474 eqd = rx_eq->min_eqd;
475 if (eqd < 10)
476 eqd = 0;
477 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000478 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479
480 rx_eq->cur_eqd = eqd;
481}
482
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700483static u32 be_calc_rate(u64 bytes, unsigned long ticks)
484{
485 u64 rate = bytes;
486
487 do_div(rate, ticks / HZ);
488 rate <<= 3; /* bytes/sec -> bits/sec */
489 do_div(rate, 1000000ul); /* MB/Sec */
490
491 return rate;
492}
493
Sathya Perla4097f662009-03-24 16:40:13 -0700494static void be_tx_rate_update(struct be_adapter *adapter)
495{
Sathya Perla3abcded2010-10-03 22:12:27 -0700496 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700497 ulong now = jiffies;
498
499 /* Wrapped around? */
500 if (time_before(now, stats->be_tx_jiffies)) {
501 stats->be_tx_jiffies = now;
502 return;
503 }
504
505 /* Update tx rate once in two seconds */
506 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700507 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
508 - stats->be_tx_bytes_prev,
509 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700510 stats->be_tx_jiffies = now;
511 stats->be_tx_bytes_prev = stats->be_tx_bytes;
512 }
513}
514
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517{
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700519 stats->be_tx_reqs++;
520 stats->be_tx_wrbs += wrb_cnt;
521 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000522 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523 if (stopped)
524 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525}
526
527/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000528static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
529 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700531 int cnt = (skb->len > skb->data_len);
532
533 cnt += skb_shinfo(skb)->nr_frags;
534
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535 /* to account for hdr wrb */
536 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000537 if (lancer_chip(adapter) || !(cnt & 1)) {
538 *dummy = false;
539 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700540 /* add a dummy to make it an even num */
541 cnt++;
542 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000543 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700544 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
545 return cnt;
546}
547
548static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
549{
550 wrb->frag_pa_hi = upper_32_bits(addr);
551 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
552 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
553}
554
Somnath Koturcc4ce022010-10-21 07:11:14 -0700555static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
556 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700558 u8 vlan_prio = 0;
559 u16 vlan_tag = 0;
560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 memset(hdr, 0, sizeof(*hdr));
562
563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
564
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000565 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700566 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
568 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000569 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000571 if (lancer_chip(adapter) && adapter->sli_family ==
572 LANCER_A0_SLI_FAMILY) {
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
574 if (is_tcp_pkt(skb))
575 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
576 tcpcs, hdr, 1);
577 else if (is_udp_pkt(skb))
578 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
579 udpcs, hdr, 1);
580 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
582 if (is_tcp_pkt(skb))
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
584 else if (is_udp_pkt(skb))
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
586 }
587
Somnath Koturcc4ce022010-10-21 07:11:14 -0700588 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700590 vlan_tag = vlan_tx_tag_get(skb);
591 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
592 /* If vlan priority provided by OS is NOT in available bmap */
593 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
594 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
595 adapter->recommended_prio;
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 }
598
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
603}
604
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000605static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000606 bool unmap_single)
607{
608 dma_addr_t dma;
609
610 be_dws_le_to_cpu(wrb, sizeof(*wrb));
611
612 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000613 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000614 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000615 dma_unmap_single(dev, dma, wrb->frag_len,
616 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000617 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000618 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000619 }
620}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621
622static int make_tx_wrbs(struct be_adapter *adapter,
623 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
624{
Sathya Perla7101e112010-03-22 20:41:12 +0000625 dma_addr_t busaddr;
626 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000627 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 struct sk_buff *first_skb = skb;
629 struct be_queue_info *txq = &adapter->tx_obj.q;
630 struct be_eth_wrb *wrb;
631 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000632 bool map_single = false;
633 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 hdr = queue_head_node(txq);
636 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000637 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
David S. Millerebc8d2a2009-06-09 01:01:31 -0700639 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700640 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000641 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
642 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000643 goto dma_err;
644 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700645 wrb = queue_head_node(txq);
646 wrb_fill(wrb, busaddr, len);
647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq);
649 copied += len;
650 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
David S. Millerebc8d2a2009-06-09 01:01:31 -0700652 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
653 struct skb_frag_struct *frag =
654 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000655 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
656 frag->size, DMA_TO_DEVICE);
657 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000658 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700659 wrb = queue_head_node(txq);
660 wrb_fill(wrb, busaddr, frag->size);
661 be_dws_cpu_to_le(wrb, sizeof(*wrb));
662 queue_head_inc(txq);
663 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 }
665
666 if (dummy_wrb) {
667 wrb = queue_head_node(txq);
668 wrb_fill(wrb, 0, 0);
669 be_dws_cpu_to_le(wrb, sizeof(*wrb));
670 queue_head_inc(txq);
671 }
672
Somnath Koturcc4ce022010-10-21 07:11:14 -0700673 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674 be_dws_cpu_to_le(hdr, sizeof(*hdr));
675
676 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000677dma_err:
678 txq->head = map_head;
679 while (copied) {
680 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000681 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000682 map_single = false;
683 copied -= wrb->frag_len;
684 queue_head_inc(txq);
685 }
686 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687}
688
Stephen Hemminger613573252009-08-31 19:50:58 +0000689static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700690 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691{
692 struct be_adapter *adapter = netdev_priv(netdev);
693 struct be_tx_obj *tx_obj = &adapter->tx_obj;
694 struct be_queue_info *txq = &tx_obj->q;
695 u32 wrb_cnt = 0, copied = 0;
696 u32 start = txq->head;
697 bool dummy_wrb, stopped = false;
698
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000699 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700
701 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000702 if (copied) {
703 /* record the sent skb in the sent_skb table */
704 BUG_ON(tx_obj->sent_skb_list[start]);
705 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000707 /* Ensure txq has space for the next skb; Else stop the queue
708 * *BEFORE* ringing the tx doorbell, so that we serialze the
709 * tx compls of the current transmit which'll wake up the queue
710 */
Sathya Perla7101e112010-03-22 20:41:12 +0000711 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000712 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
713 txq->len) {
714 netif_stop_queue(netdev);
715 stopped = true;
716 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000718 be_txq_notify(adapter, txq->id, wrb_cnt);
719
Ajit Khaparde91992e42010-02-19 13:57:12 +0000720 be_tx_stats_update(adapter, wrb_cnt, copied,
721 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000722 } else {
723 txq->head = start;
724 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 return NETDEV_TX_OK;
727}
728
729static int be_change_mtu(struct net_device *netdev, int new_mtu)
730{
731 struct be_adapter *adapter = netdev_priv(netdev);
732 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000733 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
734 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 dev_info(&adapter->pdev->dev,
736 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000737 BE_MIN_MTU,
738 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 return -EINVAL;
740 }
741 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
742 netdev->mtu, new_mtu);
743 netdev->mtu = new_mtu;
744 return 0;
745}
746
747/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000748 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
749 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000751static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753 u16 vtag[BE_NUM_VLANS_SUPPORTED];
754 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000755 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000756 u32 if_handle;
757
758 if (vf) {
759 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
760 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
761 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
762 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763
Ajit Khaparde82903e42010-02-09 01:34:57 +0000764 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000766 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 if (adapter->vlan_tag[i]) {
768 vtag[ntags] = cpu_to_le16(i);
769 ntags++;
770 }
771 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700772 status = be_cmd_vlan_config(adapter, adapter->if_handle,
773 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700775 status = be_cmd_vlan_config(adapter, adapter->if_handle,
776 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000778
Sathya Perlab31c50a2009-09-17 10:30:13 -0700779 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780}
781
782static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
783{
784 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787}
788
789static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
790{
791 struct be_adapter *adapter = netdev_priv(netdev);
792
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000794 if (!be_physfn(adapter))
795 return;
796
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000798 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000799 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800}
801
802static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
803{
804 struct be_adapter *adapter = netdev_priv(netdev);
805
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000806 adapter->vlans_added--;
807 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
808
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000809 if (!be_physfn(adapter))
810 return;
811
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000813 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000814 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815}
816
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817static void be_set_multicast_list(struct net_device *netdev)
818{
819 struct be_adapter *adapter = netdev_priv(netdev);
820
821 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000822 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000823 adapter->promiscuous = true;
824 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000826
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300827 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000828 if (adapter->promiscuous) {
829 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000830 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000831 }
832
Sathya Perlae7b909a2009-11-22 22:01:10 +0000833 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000834 if (netdev->flags & IFF_ALLMULTI ||
835 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000836 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000837 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000838 goto done;
839 }
840
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000841 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800842 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000843done:
844 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845}
846
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000847static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
848{
849 struct be_adapter *adapter = netdev_priv(netdev);
850 int status;
851
852 if (!adapter->sriov_enabled)
853 return -EPERM;
854
855 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
856 return -EINVAL;
857
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000858 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
859 status = be_cmd_pmac_del(adapter,
860 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000861 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000862
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000863 status = be_cmd_pmac_add(adapter, mac,
864 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000865 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000866
867 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000868 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
869 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000870 else
871 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
872
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000873 return status;
874}
875
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000876static int be_get_vf_config(struct net_device *netdev, int vf,
877 struct ifla_vf_info *vi)
878{
879 struct be_adapter *adapter = netdev_priv(netdev);
880
881 if (!adapter->sriov_enabled)
882 return -EPERM;
883
884 if (vf >= num_vfs)
885 return -EINVAL;
886
887 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000888 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000889 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000890 vi->qos = 0;
891 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
892
893 return 0;
894}
895
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000896static int be_set_vf_vlan(struct net_device *netdev,
897 int vf, u16 vlan, u8 qos)
898{
899 struct be_adapter *adapter = netdev_priv(netdev);
900 int status = 0;
901
902 if (!adapter->sriov_enabled)
903 return -EPERM;
904
905 if ((vf >= num_vfs) || (vlan > 4095))
906 return -EINVAL;
907
908 if (vlan) {
909 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
910 adapter->vlans_added++;
911 } else {
912 adapter->vf_cfg[vf].vf_vlan_tag = 0;
913 adapter->vlans_added--;
914 }
915
916 status = be_vid_config(adapter, true, vf);
917
918 if (status)
919 dev_info(&adapter->pdev->dev,
920 "VLAN %d config on VF %d failed\n", vlan, vf);
921 return status;
922}
923
Ajit Khapardee1d18732010-07-23 01:52:13 +0000924static int be_set_vf_tx_rate(struct net_device *netdev,
925 int vf, int rate)
926{
927 struct be_adapter *adapter = netdev_priv(netdev);
928 int status = 0;
929
930 if (!adapter->sriov_enabled)
931 return -EPERM;
932
933 if ((vf >= num_vfs) || (rate < 0))
934 return -EINVAL;
935
936 if (rate > 10000)
937 rate = 10000;
938
939 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000940 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000941
942 if (status)
943 dev_info(&adapter->pdev->dev,
944 "tx rate %d on VF %d failed\n", rate, vf);
945 return status;
946}
947
Sathya Perla3abcded2010-10-03 22:12:27 -0700948static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700949{
Sathya Perla3abcded2010-10-03 22:12:27 -0700950 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700951 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700952
Sathya Perla4097f662009-03-24 16:40:13 -0700953 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700954 if (time_before(now, stats->rx_jiffies)) {
955 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700956 return;
957 }
958
959 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700960 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700961 return;
962
Sathya Perla3abcded2010-10-03 22:12:27 -0700963 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
964 now - stats->rx_jiffies);
965 stats->rx_jiffies = now;
966 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700967}
968
Sathya Perla3abcded2010-10-03 22:12:27 -0700969static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000970 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700971{
Sathya Perla3abcded2010-10-03 22:12:27 -0700972 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700973
Sathya Perla3abcded2010-10-03 22:12:27 -0700974 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000975 stats->rx_frags += rxcp->num_rcvd;
976 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700977 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000978 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700979 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000980 if (rxcp->err)
981 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982}
983
Sathya Perla2e588f82011-03-11 02:49:26 +0000984static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700985{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000986 /* L4 checksum is not reliable for non TCP/UDP packets.
987 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000988 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
989 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700990}
991
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700992static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700993get_rx_page_info(struct be_adapter *adapter,
994 struct be_rx_obj *rxo,
995 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996{
997 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700998 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999
Sathya Perla3abcded2010-10-03 22:12:27 -07001000 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001001 BUG_ON(!rx_page_info->page);
1002
Ajit Khaparde205859a2010-02-09 01:34:21 +00001003 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001004 dma_unmap_page(&adapter->pdev->dev,
1005 dma_unmap_addr(rx_page_info, bus),
1006 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001007 rx_page_info->last_page_user = false;
1008 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009
1010 atomic_dec(&rxq->used);
1011 return rx_page_info;
1012}
1013
1014/* Throwaway the data in the Rx completion */
1015static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001016 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001017 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001018{
Sathya Perla3abcded2010-10-03 22:12:27 -07001019 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001021 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001022
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001023 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001024 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001025 put_page(page_info->page);
1026 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001027 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028 }
1029}
1030
1031/*
1032 * skb_fill_rx_data forms a complete skb for an ether frame
1033 * indicated by rxcp.
1034 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001035static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001036 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001037{
Sathya Perla3abcded2010-10-03 22:12:27 -07001038 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001040 u16 i, j;
1041 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042 u8 *start;
1043
Sathya Perla2e588f82011-03-11 02:49:26 +00001044 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001045 start = page_address(page_info->page) + page_info->page_offset;
1046 prefetch(start);
1047
1048 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001049 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
1051 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001052 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053 memcpy(skb->data, start, hdr_len);
1054 skb->len = curr_frag_len;
1055 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1056 /* Complete packet has now been moved to data */
1057 put_page(page_info->page);
1058 skb->data_len = 0;
1059 skb->tail += curr_frag_len;
1060 } else {
1061 skb_shinfo(skb)->nr_frags = 1;
1062 skb_shinfo(skb)->frags[0].page = page_info->page;
1063 skb_shinfo(skb)->frags[0].page_offset =
1064 page_info->page_offset + hdr_len;
1065 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1066 skb->data_len = curr_frag_len - hdr_len;
1067 skb->tail += hdr_len;
1068 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001069 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070
Sathya Perla2e588f82011-03-11 02:49:26 +00001071 if (rxcp->pkt_size <= rx_frag_size) {
1072 BUG_ON(rxcp->num_rcvd != 1);
1073 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074 }
1075
1076 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001077 index_inc(&rxcp->rxq_idx, rxq->len);
1078 remaining = rxcp->pkt_size - curr_frag_len;
1079 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1080 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1081 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001083 /* Coalesce all frags from the same physical page in one slot */
1084 if (page_info->page_offset == 0) {
1085 /* Fresh page */
1086 j++;
1087 skb_shinfo(skb)->frags[j].page = page_info->page;
1088 skb_shinfo(skb)->frags[j].page_offset =
1089 page_info->page_offset;
1090 skb_shinfo(skb)->frags[j].size = 0;
1091 skb_shinfo(skb)->nr_frags++;
1092 } else {
1093 put_page(page_info->page);
1094 }
1095
1096 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097 skb->len += curr_frag_len;
1098 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099
Sathya Perla2e588f82011-03-11 02:49:26 +00001100 remaining -= curr_frag_len;
1101 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001102 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001104 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105}
1106
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001107/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001108static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001109 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001110 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001112 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001114
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001115 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001116 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117 if (net_ratelimit())
1118 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001119 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120 return;
1121 }
1122
Sathya Perla2e588f82011-03-11 02:49:26 +00001123 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001125 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001126 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001127 else
1128 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129
1130 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001131 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001132 if (adapter->netdev->features & NETIF_F_RXHASH)
1133 skb->rxhash = rxcp->rss_hash;
1134
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135
Sathya Perla2e588f82011-03-11 02:49:26 +00001136 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001137 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138 kfree_skb(skb);
1139 return;
1140 }
Somnath Kotur6709d952011-05-04 22:40:46 +00001141 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1142 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143 } else {
1144 netif_receive_skb(skb);
1145 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146}
1147
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001148/* Process the RX completion indicated by rxcp when GRO is enabled */
1149static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001150 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001151 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152{
1153 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001154 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001155 struct be_queue_info *rxq = &rxo->q;
1156 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001157 u16 remaining, curr_frag_len;
1158 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001159
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001160 skb = napi_get_frags(&eq_obj->napi);
1161 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001162 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001163 return;
1164 }
1165
Sathya Perla2e588f82011-03-11 02:49:26 +00001166 remaining = rxcp->pkt_size;
1167 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169
1170 curr_frag_len = min(remaining, rx_frag_size);
1171
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001172 /* Coalesce all frags from the same physical page in one slot */
1173 if (i == 0 || page_info->page_offset == 0) {
1174 /* First frag or Fresh page */
1175 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001176 skb_shinfo(skb)->frags[j].page = page_info->page;
1177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset;
1179 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001180 } else {
1181 put_page(page_info->page);
1182 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001183 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001184
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001186 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187 memset(page_info, 0, sizeof(*page_info));
1188 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001189 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001191 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 skb->len = rxcp->pkt_size;
1193 skb->data_len = rxcp->pkt_size;
1194 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001195 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001196 if (adapter->netdev->features & NETIF_F_RXHASH)
1197 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001198
Sathya Perla2e588f82011-03-11 02:49:26 +00001199 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001200 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001201 else
Somnath Kotur6709d952011-05-04 22:40:46 +00001202 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1203 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204}
1205
Sathya Perla2e588f82011-03-11 02:49:26 +00001206static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1207 struct be_eth_rx_compl *compl,
1208 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209{
Sathya Perla2e588f82011-03-11 02:49:26 +00001210 rxcp->pkt_size =
1211 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1212 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1213 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1214 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001215 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001216 rxcp->ip_csum =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1218 rxcp->l4_csum =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1220 rxcp->ipv6 =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1222 rxcp->rxq_idx =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1224 rxcp->num_rcvd =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1226 rxcp->pkt_type =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001228 rxcp->rss_hash =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001230 if (rxcp->vlanf) {
1231 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001232 compl);
1233 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1234 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001235 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001236}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237
Sathya Perla2e588f82011-03-11 02:49:26 +00001238static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1239 struct be_eth_rx_compl *compl,
1240 struct be_rx_compl_info *rxcp)
1241{
1242 rxcp->pkt_size =
1243 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1244 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1245 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1246 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001247 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001248 rxcp->ip_csum =
1249 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1250 rxcp->l4_csum =
1251 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1252 rxcp->ipv6 =
1253 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1254 rxcp->rxq_idx =
1255 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1256 rxcp->num_rcvd =
1257 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1258 rxcp->pkt_type =
1259 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001260 rxcp->rss_hash =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001262 if (rxcp->vlanf) {
1263 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001264 compl);
1265 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1266 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001267 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001268}
1269
1270static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1271{
1272 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1273 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1274 struct be_adapter *adapter = rxo->adapter;
1275
1276 /* For checking the valid bit it is Ok to use either definition as the
1277 * valid bit is at the same position in both v0 and v1 Rx compl */
1278 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279 return NULL;
1280
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001281 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001282 be_dws_le_to_cpu(compl, sizeof(*compl));
1283
1284 if (adapter->be3_native)
1285 be_parse_rx_compl_v1(adapter, compl, rxcp);
1286 else
1287 be_parse_rx_compl_v0(adapter, compl, rxcp);
1288
Sathya Perla15d72182011-03-21 20:49:26 +00001289 if (rxcp->vlanf) {
1290 /* vlanf could be wrongly set in some cards.
1291 * ignore if vtm is not set */
1292 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1293 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001294
Sathya Perla15d72182011-03-21 20:49:26 +00001295 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001296 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001297
David S. Miller3c709f82011-05-11 14:26:15 -04001298 if (((adapter->pvid & VLAN_VID_MASK) ==
1299 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1300 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001301 rxcp->vlanf = 0;
1302 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001303
1304 /* As the compl has been parsed, reset it; we wont touch it again */
1305 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001306
Sathya Perla3abcded2010-10-03 22:12:27 -07001307 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308 return rxcp;
1309}
1310
Eric Dumazet1829b082011-03-01 05:48:12 +00001311static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001314
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001316 gfp |= __GFP_COMP;
1317 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318}
1319
1320/*
1321 * Allocate a page, split it to fragments of size rx_frag_size and post as
1322 * receive buffers to BE
1323 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001324static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325{
Sathya Perla3abcded2010-10-03 22:12:27 -07001326 struct be_adapter *adapter = rxo->adapter;
1327 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001328 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001329 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001330 struct page *pagep = NULL;
1331 struct be_eth_rx_d *rxd;
1332 u64 page_dmaaddr = 0, frag_dmaaddr;
1333 u32 posted, page_offset = 0;
1334
Sathya Perla3abcded2010-10-03 22:12:27 -07001335 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1337 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001338 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001339 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001340 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 break;
1342 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001343 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1344 0, adapter->big_page_size,
1345 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346 page_info->page_offset = 0;
1347 } else {
1348 get_page(pagep);
1349 page_info->page_offset = page_offset + rx_frag_size;
1350 }
1351 page_offset = page_info->page_offset;
1352 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001353 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001354 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1355
1356 rxd = queue_head_node(rxq);
1357 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1358 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359
1360 /* Any space left in the current big page for another frag? */
1361 if ((page_offset + rx_frag_size + rx_frag_size) >
1362 adapter->big_page_size) {
1363 pagep = NULL;
1364 page_info->last_page_user = true;
1365 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001366
1367 prev_page_info = page_info;
1368 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001369 page_info = &page_info_tbl[rxq->head];
1370 }
1371 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001372 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
1374 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001376 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001377 } else if (atomic_read(&rxq->used) == 0) {
1378 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001379 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381}
1382
Sathya Perla5fb379e2009-06-18 00:02:59 +00001383static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1386
1387 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1388 return NULL;
1389
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001390 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1392
1393 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1394
1395 queue_tail_inc(tx_cq);
1396 return txcp;
1397}
1398
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001399static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400{
1401 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001402 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1404 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001405 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1406 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001408 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001410 sent_skbs[txq->tail] = NULL;
1411
1412 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001413 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001415 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001417 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001418 unmap_tx_frag(&adapter->pdev->dev, wrb,
1419 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001420 unmap_skb_hdr = false;
1421
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 num_wrbs++;
1423 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001424 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001427 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428}
1429
Sathya Perla859b1e42009-08-10 03:43:51 +00001430static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1431{
1432 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1433
1434 if (!eqe->evt)
1435 return NULL;
1436
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001437 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001438 eqe->evt = le32_to_cpu(eqe->evt);
1439 queue_tail_inc(&eq_obj->q);
1440 return eqe;
1441}
1442
1443static int event_handle(struct be_adapter *adapter,
1444 struct be_eq_obj *eq_obj)
1445{
1446 struct be_eq_entry *eqe;
1447 u16 num = 0;
1448
1449 while ((eqe = event_get(eq_obj)) != NULL) {
1450 eqe->evt = 0;
1451 num++;
1452 }
1453
1454 /* Deal with any spurious interrupts that come
1455 * without events
1456 */
1457 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1458 if (num)
1459 napi_schedule(&eq_obj->napi);
1460
1461 return num;
1462}
1463
1464/* Just read and notify events without processing them.
1465 * Used at the time of destroying event queues */
1466static void be_eq_clean(struct be_adapter *adapter,
1467 struct be_eq_obj *eq_obj)
1468{
1469 struct be_eq_entry *eqe;
1470 u16 num = 0;
1471
1472 while ((eqe = event_get(eq_obj)) != NULL) {
1473 eqe->evt = 0;
1474 num++;
1475 }
1476
1477 if (num)
1478 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1479}
1480
Sathya Perla3abcded2010-10-03 22:12:27 -07001481static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482{
1483 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001484 struct be_queue_info *rxq = &rxo->q;
1485 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001486 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 u16 tail;
1488
1489 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001490 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1491 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001492 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 }
1494
1495 /* Then free posted rx buffer that were not used */
1496 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001497 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001498 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 put_page(page_info->page);
1500 memset(page_info, 0, sizeof(*page_info));
1501 }
1502 BUG_ON(atomic_read(&rxq->used));
1503}
1504
Sathya Perlaa8e91792009-08-10 03:42:43 +00001505static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001507 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001509 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001510 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001511 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1512 struct sk_buff *sent_skb;
1513 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514
Sathya Perlaa8e91792009-08-10 03:42:43 +00001515 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1516 do {
1517 while ((txcp = be_tx_compl_get(tx_cq))) {
1518 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1519 wrb_index, txcp);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001520 num_wrbs += be_tx_compl_process(adapter, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001521 cmpl++;
1522 }
1523 if (cmpl) {
1524 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001525 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001526 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001527 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001528 }
1529
1530 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1531 break;
1532
1533 mdelay(1);
1534 } while (true);
1535
1536 if (atomic_read(&txq->used))
1537 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1538 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001539
1540 /* free posted tx for which compls will never arrive */
1541 while (atomic_read(&txq->used)) {
1542 sent_skb = sent_skbs[txq->tail];
1543 end_idx = txq->tail;
1544 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001545 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1546 txq->len);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001547 num_wrbs = be_tx_compl_process(adapter, end_idx);
1548 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001549 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550}
1551
Sathya Perla5fb379e2009-06-18 00:02:59 +00001552static void be_mcc_queues_destroy(struct be_adapter *adapter)
1553{
1554 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001555
Sathya Perla8788fdc2009-07-27 22:52:03 +00001556 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001557 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001558 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001559 be_queue_free(adapter, q);
1560
Sathya Perla8788fdc2009-07-27 22:52:03 +00001561 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001562 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001563 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001564 be_queue_free(adapter, q);
1565}
1566
1567/* Must be called only after TX qs are created as MCC shares TX EQ */
1568static int be_mcc_queues_create(struct be_adapter *adapter)
1569{
1570 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001571
1572 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001573 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001574 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001575 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001576 goto err;
1577
1578 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001579 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001580 goto mcc_cq_free;
1581
1582 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001583 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001584 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1585 goto mcc_cq_destroy;
1586
1587 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001588 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001589 goto mcc_q_free;
1590
1591 return 0;
1592
1593mcc_q_free:
1594 be_queue_free(adapter, q);
1595mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001596 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001597mcc_cq_free:
1598 be_queue_free(adapter, cq);
1599err:
1600 return -1;
1601}
1602
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603static void be_tx_queues_destroy(struct be_adapter *adapter)
1604{
1605 struct be_queue_info *q;
1606
1607 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001608 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001609 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610 be_queue_free(adapter, q);
1611
1612 q = &adapter->tx_obj.cq;
1613 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001614 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615 be_queue_free(adapter, q);
1616
Sathya Perla859b1e42009-08-10 03:43:51 +00001617 /* Clear any residual events */
1618 be_eq_clean(adapter, &adapter->tx_eq);
1619
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620 q = &adapter->tx_eq.q;
1621 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001622 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623 be_queue_free(adapter, q);
1624}
1625
1626static int be_tx_queues_create(struct be_adapter *adapter)
1627{
1628 struct be_queue_info *eq, *q, *cq;
1629
1630 adapter->tx_eq.max_eqd = 0;
1631 adapter->tx_eq.min_eqd = 0;
1632 adapter->tx_eq.cur_eqd = 96;
1633 adapter->tx_eq.enable_aic = false;
1634 /* Alloc Tx Event queue */
1635 eq = &adapter->tx_eq.q;
1636 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1637 return -1;
1638
1639 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001640 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001642
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001643 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001644
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001645
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646 /* Alloc TX eth compl queue */
1647 cq = &adapter->tx_obj.cq;
1648 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1649 sizeof(struct be_eth_tx_compl)))
1650 goto tx_eq_destroy;
1651
1652 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001653 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 goto tx_cq_free;
1655
1656 /* Alloc TX eth queue */
1657 q = &adapter->tx_obj.q;
1658 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1659 goto tx_cq_destroy;
1660
1661 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001662 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663 goto tx_q_free;
1664 return 0;
1665
1666tx_q_free:
1667 be_queue_free(adapter, q);
1668tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001669 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670tx_cq_free:
1671 be_queue_free(adapter, cq);
1672tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001673 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674tx_eq_free:
1675 be_queue_free(adapter, eq);
1676 return -1;
1677}
1678
1679static void be_rx_queues_destroy(struct be_adapter *adapter)
1680{
1681 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001682 struct be_rx_obj *rxo;
1683 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684
Sathya Perla3abcded2010-10-03 22:12:27 -07001685 for_all_rx_queues(adapter, rxo, i) {
1686 q = &rxo->q;
1687 if (q->created) {
1688 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1689 /* After the rxq is invalidated, wait for a grace time
1690 * of 1ms for all dma to end and the flush compl to
1691 * arrive
1692 */
1693 mdelay(1);
1694 be_rx_q_clean(adapter, rxo);
1695 }
1696 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001697
Sathya Perla3abcded2010-10-03 22:12:27 -07001698 q = &rxo->cq;
1699 if (q->created)
1700 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1701 be_queue_free(adapter, q);
1702
1703 /* Clear any residual events */
1704 q = &rxo->rx_eq.q;
1705 if (q->created) {
1706 be_eq_clean(adapter, &rxo->rx_eq);
1707 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1708 }
1709 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001711}
1712
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001713static u32 be_num_rxqs_want(struct be_adapter *adapter)
1714{
1715 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1716 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1717 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1718 } else {
1719 dev_warn(&adapter->pdev->dev,
1720 "No support for multiple RX queues\n");
1721 return 1;
1722 }
1723}
1724
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725static int be_rx_queues_create(struct be_adapter *adapter)
1726{
1727 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001728 struct be_rx_obj *rxo;
1729 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001731 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1732 msix_enabled(adapter) ?
1733 adapter->num_msix_vec - 1 : 1);
1734 if (adapter->num_rx_qs != MAX_RX_QS)
1735 dev_warn(&adapter->pdev->dev,
1736 "Can create only %d RX queues", adapter->num_rx_qs);
1737
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001739 for_all_rx_queues(adapter, rxo, i) {
1740 rxo->adapter = adapter;
1741 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1742 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743
Sathya Perla3abcded2010-10-03 22:12:27 -07001744 /* EQ */
1745 eq = &rxo->rx_eq.q;
1746 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1747 sizeof(struct be_eq_entry));
1748 if (rc)
1749 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750
Sathya Perla3abcded2010-10-03 22:12:27 -07001751 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1752 if (rc)
1753 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001755 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001756
Sathya Perla3abcded2010-10-03 22:12:27 -07001757 /* CQ */
1758 cq = &rxo->cq;
1759 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1760 sizeof(struct be_eth_rx_compl));
1761 if (rc)
1762 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763
Sathya Perla3abcded2010-10-03 22:12:27 -07001764 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1765 if (rc)
1766 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 /* Rx Q */
1768 q = &rxo->q;
1769 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1770 sizeof(struct be_eth_rx_d));
1771 if (rc)
1772 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773
Sathya Perla3abcded2010-10-03 22:12:27 -07001774 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1775 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1776 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1777 if (rc)
1778 goto err;
1779 }
1780
1781 if (be_multi_rxq(adapter)) {
1782 u8 rsstable[MAX_RSS_QS];
1783
1784 for_all_rss_queues(adapter, rxo, i)
1785 rsstable[i] = rxo->rss_id;
1786
1787 rc = be_cmd_rss_config(adapter, rsstable,
1788 adapter->num_rx_qs - 1);
1789 if (rc)
1790 goto err;
1791 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792
1793 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001794err:
1795 be_rx_queues_destroy(adapter);
1796 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001799static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001800{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001801 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1802 if (!eqe->evt)
1803 return false;
1804 else
1805 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001806}
1807
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808static irqreturn_t be_intx(int irq, void *dev)
1809{
1810 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001811 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001812 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001814 if (lancer_chip(adapter)) {
1815 if (event_peek(&adapter->tx_eq))
1816 tx = event_handle(adapter, &adapter->tx_eq);
1817 for_all_rx_queues(adapter, rxo, i) {
1818 if (event_peek(&rxo->rx_eq))
1819 rx |= event_handle(adapter, &rxo->rx_eq);
1820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001822 if (!(tx || rx))
1823 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001824
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001825 } else {
1826 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1827 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1828 if (!isr)
1829 return IRQ_NONE;
1830
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001831 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001832 event_handle(adapter, &adapter->tx_eq);
1833
1834 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001835 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001836 event_handle(adapter, &rxo->rx_eq);
1837 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001838 }
Sathya Perlac001c212009-07-01 01:06:07 +00001839
Sathya Perla8788fdc2009-07-27 22:52:03 +00001840 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841}
1842
1843static irqreturn_t be_msix_rx(int irq, void *dev)
1844{
Sathya Perla3abcded2010-10-03 22:12:27 -07001845 struct be_rx_obj *rxo = dev;
1846 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847
Sathya Perla3abcded2010-10-03 22:12:27 -07001848 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849
1850 return IRQ_HANDLED;
1851}
1852
Sathya Perla5fb379e2009-06-18 00:02:59 +00001853static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854{
1855 struct be_adapter *adapter = dev;
1856
Sathya Perla8788fdc2009-07-27 22:52:03 +00001857 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858
1859 return IRQ_HANDLED;
1860}
1861
Sathya Perla2e588f82011-03-11 02:49:26 +00001862static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863{
Sathya Perla2e588f82011-03-11 02:49:26 +00001864 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865}
1866
stephen hemminger49b05222010-10-21 07:50:48 +00001867static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868{
1869 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001870 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1871 struct be_adapter *adapter = rxo->adapter;
1872 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001873 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874 u32 work_done;
1875
Sathya Perla3abcded2010-10-03 22:12:27 -07001876 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001878 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 if (!rxcp)
1880 break;
1881
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001882 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001883 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001884 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001885 be_rx_compl_process_gro(adapter, rxo, rxcp);
1886 else
1887 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001888 } else if (rxcp->pkt_size == 0) {
1889 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001890 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001891
Sathya Perla2e588f82011-03-11 02:49:26 +00001892 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893 }
1894
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001896 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001897 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898
1899 /* All consumed */
1900 if (work_done < budget) {
1901 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001902 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 } else {
1904 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001905 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906 }
1907 return work_done;
1908}
1909
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001910/* As TX and MCC share the same EQ check for both TX and MCC completions.
1911 * For TX/MCC we don't honour budget; consume everything
1912 */
1913static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001915 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1916 struct be_adapter *adapter =
1917 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001918 struct be_queue_info *txq = &adapter->tx_obj.q;
1919 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001921 int tx_compl = 0, mcc_compl, status = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001922 u16 end_idx, num_wrbs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923
Sathya Perla5fb379e2009-06-18 00:02:59 +00001924 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001925 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001926 wrb_index, txcp);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001927 num_wrbs += be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001928 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929 }
1930
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001931 mcc_compl = be_process_mcc(adapter, &status);
1932
1933 napi_complete(napi);
1934
1935 if (mcc_compl) {
1936 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1937 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1938 }
1939
1940 if (tx_compl) {
1941 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001942
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001943 atomic_sub(num_wrbs, &txq->used);
1944
Sathya Perla5fb379e2009-06-18 00:02:59 +00001945 /* As Tx wrbs have been freed up, wake up netdev queue if
1946 * it was stopped due to lack of tx wrbs.
1947 */
1948 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001950 netif_wake_queue(adapter->netdev);
1951 }
1952
Sathya Perla3abcded2010-10-03 22:12:27 -07001953 tx_stats(adapter)->be_tx_events++;
1954 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956
1957 return 1;
1958}
1959
Ajit Khaparded053de92010-09-03 06:23:30 +00001960void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001961{
1962 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1963 u32 i;
1964
1965 pci_read_config_dword(adapter->pdev,
1966 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1967 pci_read_config_dword(adapter->pdev,
1968 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1969 pci_read_config_dword(adapter->pdev,
1970 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1971 pci_read_config_dword(adapter->pdev,
1972 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1973
1974 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1975 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1976
Ajit Khaparded053de92010-09-03 06:23:30 +00001977 if (ue_status_lo || ue_status_hi) {
1978 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001979 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001980 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1981 }
1982
Ajit Khaparde7c185272010-07-29 06:16:33 +00001983 if (ue_status_lo) {
1984 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1985 if (ue_status_lo & 1)
1986 dev_err(&adapter->pdev->dev,
1987 "UE: %s bit set\n", ue_status_low_desc[i]);
1988 }
1989 }
1990 if (ue_status_hi) {
1991 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1992 if (ue_status_hi & 1)
1993 dev_err(&adapter->pdev->dev,
1994 "UE: %s bit set\n", ue_status_hi_desc[i]);
1995 }
1996 }
1997
1998}
1999
Sathya Perlaea1dae12009-03-19 23:56:20 -07002000static void be_worker(struct work_struct *work)
2001{
2002 struct be_adapter *adapter =
2003 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002004 struct be_rx_obj *rxo;
2005 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002006
Sathya Perla16da8252011-03-21 20:49:27 +00002007 if (!adapter->ue_detected && !lancer_chip(adapter))
2008 be_detect_dump_ue(adapter);
2009
Somnath Koturf203af72010-10-25 23:01:03 +00002010 /* when interrupts are not yet enabled, just reap any pending
2011 * mcc completions */
2012 if (!netif_running(adapter->netdev)) {
2013 int mcc_compl, status = 0;
2014
2015 mcc_compl = be_process_mcc(adapter, &status);
2016
2017 if (mcc_compl) {
2018 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2019 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2020 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002021
Somnath Koturf203af72010-10-25 23:01:03 +00002022 goto reschedule;
2023 }
2024
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00002025 if (!adapter->stats_cmd_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07002026 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002027
Sathya Perla4097f662009-03-24 16:40:13 -07002028 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07002029
Sathya Perla3abcded2010-10-03 22:12:27 -07002030 for_all_rx_queues(adapter, rxo, i) {
2031 be_rx_rate_update(rxo);
2032 be_rx_eqd_update(adapter, rxo);
2033
2034 if (rxo->rx_post_starved) {
2035 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002036 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002037 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002038 }
2039
Somnath Koturf203af72010-10-25 23:01:03 +00002040reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002041 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002042 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2043}
2044
Sathya Perla8d56ff12009-11-22 22:02:26 +00002045static void be_msix_disable(struct be_adapter *adapter)
2046{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002047 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002048 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002049 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002050 }
2051}
2052
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053static void be_msix_enable(struct be_adapter *adapter)
2054{
Sathya Perla3abcded2010-10-03 22:12:27 -07002055#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002056 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002058 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002059
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002060 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002061 adapter->msix_entries[i].entry = i;
2062
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002063 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002064 if (status == 0) {
2065 goto done;
2066 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002067 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002068 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002069 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002070 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002071 }
2072 return;
2073done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002074 adapter->num_msix_vec = num_vec;
2075 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076}
2077
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002078static void be_sriov_enable(struct be_adapter *adapter)
2079{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002080 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002081#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002082 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002083 int status, pos;
2084 u16 nvfs;
2085
2086 pos = pci_find_ext_capability(adapter->pdev,
2087 PCI_EXT_CAP_ID_SRIOV);
2088 pci_read_config_word(adapter->pdev,
2089 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2090
2091 if (num_vfs > nvfs) {
2092 dev_info(&adapter->pdev->dev,
2093 "Device supports %d VFs and not %d\n",
2094 nvfs, num_vfs);
2095 num_vfs = nvfs;
2096 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002097
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002098 status = pci_enable_sriov(adapter->pdev, num_vfs);
2099 adapter->sriov_enabled = status ? false : true;
2100 }
2101#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002102}
2103
2104static void be_sriov_disable(struct be_adapter *adapter)
2105{
2106#ifdef CONFIG_PCI_IOV
2107 if (adapter->sriov_enabled) {
2108 pci_disable_sriov(adapter->pdev);
2109 adapter->sriov_enabled = false;
2110 }
2111#endif
2112}
2113
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002114static inline int be_msix_vec_get(struct be_adapter *adapter,
2115 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002117 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002118}
2119
2120static int be_request_irq(struct be_adapter *adapter,
2121 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002122 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002123{
2124 struct net_device *netdev = adapter->netdev;
2125 int vec;
2126
2127 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002128 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002129 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002130}
2131
Sathya Perla3abcded2010-10-03 22:12:27 -07002132static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2133 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002134{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002135 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002136 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137}
2138
2139static int be_msix_register(struct be_adapter *adapter)
2140{
Sathya Perla3abcded2010-10-03 22:12:27 -07002141 struct be_rx_obj *rxo;
2142 int status, i;
2143 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144
Sathya Perla3abcded2010-10-03 22:12:27 -07002145 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2146 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002147 if (status)
2148 goto err;
2149
Sathya Perla3abcded2010-10-03 22:12:27 -07002150 for_all_rx_queues(adapter, rxo, i) {
2151 sprintf(qname, "rxq%d", i);
2152 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2153 qname, rxo);
2154 if (status)
2155 goto err_msix;
2156 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002157
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002158 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002159
Sathya Perla3abcded2010-10-03 22:12:27 -07002160err_msix:
2161 be_free_irq(adapter, &adapter->tx_eq, adapter);
2162
2163 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2164 be_free_irq(adapter, &rxo->rx_eq, rxo);
2165
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002166err:
2167 dev_warn(&adapter->pdev->dev,
2168 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002169 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002170 return status;
2171}
2172
2173static int be_irq_register(struct be_adapter *adapter)
2174{
2175 struct net_device *netdev = adapter->netdev;
2176 int status;
2177
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002178 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 status = be_msix_register(adapter);
2180 if (status == 0)
2181 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002182 /* INTx is not supported for VF */
2183 if (!be_physfn(adapter))
2184 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185 }
2186
2187 /* INTx */
2188 netdev->irq = adapter->pdev->irq;
2189 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2190 adapter);
2191 if (status) {
2192 dev_err(&adapter->pdev->dev,
2193 "INTx request IRQ failed - err %d\n", status);
2194 return status;
2195 }
2196done:
2197 adapter->isr_registered = true;
2198 return 0;
2199}
2200
2201static void be_irq_unregister(struct be_adapter *adapter)
2202{
2203 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002204 struct be_rx_obj *rxo;
2205 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206
2207 if (!adapter->isr_registered)
2208 return;
2209
2210 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002211 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212 free_irq(netdev->irq, adapter);
2213 goto done;
2214 }
2215
2216 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002217 be_free_irq(adapter, &adapter->tx_eq, adapter);
2218
2219 for_all_rx_queues(adapter, rxo, i)
2220 be_free_irq(adapter, &rxo->rx_eq, rxo);
2221
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222done:
2223 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224}
2225
Sathya Perla889cd4b2010-05-30 23:33:45 +00002226static int be_close(struct net_device *netdev)
2227{
2228 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002229 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002230 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002231 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002232
Sathya Perla889cd4b2010-05-30 23:33:45 +00002233 be_async_mcc_disable(adapter);
2234
Sathya Perla889cd4b2010-05-30 23:33:45 +00002235 netif_carrier_off(netdev);
2236 adapter->link_up = false;
2237
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002238 if (!lancer_chip(adapter))
2239 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002240
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002241 for_all_rx_queues(adapter, rxo, i)
2242 napi_disable(&rxo->rx_eq.napi);
2243
2244 napi_disable(&tx_eq->napi);
2245
2246 if (lancer_chip(adapter)) {
2247 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2248 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2249 for_all_rx_queues(adapter, rxo, i)
2250 be_cq_notify(adapter, rxo->cq.id, false, 0);
2251 }
2252
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002253 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002254 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002255 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002256
2257 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002258 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002259 synchronize_irq(vec);
2260 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002261 } else {
2262 synchronize_irq(netdev->irq);
2263 }
2264 be_irq_unregister(adapter);
2265
Sathya Perla889cd4b2010-05-30 23:33:45 +00002266 /* Wait for all pending tx completions to arrive so that
2267 * all tx skbs are freed.
2268 */
2269 be_tx_compl_clean(adapter);
2270
2271 return 0;
2272}
2273
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274static int be_open(struct net_device *netdev)
2275{
2276 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002278 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002279 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002280 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002281 u8 mac_speed;
2282 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002283
Sathya Perla3abcded2010-10-03 22:12:27 -07002284 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002285 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002286 napi_enable(&rxo->rx_eq.napi);
2287 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002288 napi_enable(&tx_eq->napi);
2289
2290 be_irq_register(adapter);
2291
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002292 if (!lancer_chip(adapter))
2293 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002294
2295 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002296 for_all_rx_queues(adapter, rxo, i) {
2297 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2298 be_cq_notify(adapter, rxo->cq.id, true, 0);
2299 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002300 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002301
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002302 /* Now that interrupts are on we can process async mcc */
2303 be_async_mcc_enable(adapter);
2304
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002305 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002306 &link_speed, 0);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002307 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002308 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002309 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002310
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002311 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002312 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002313 if (status)
2314 goto err;
2315
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002316 status = be_cmd_set_flow_control(adapter,
2317 adapter->tx_fc, adapter->rx_fc);
2318 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002319 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002320 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002321
Sathya Perla889cd4b2010-05-30 23:33:45 +00002322 return 0;
2323err:
2324 be_close(adapter->netdev);
2325 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002326}
2327
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002328static int be_setup_wol(struct be_adapter *adapter, bool enable)
2329{
2330 struct be_dma_mem cmd;
2331 int status = 0;
2332 u8 mac[ETH_ALEN];
2333
2334 memset(mac, 0, ETH_ALEN);
2335
2336 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002337 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2338 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002339 if (cmd.va == NULL)
2340 return -1;
2341 memset(cmd.va, 0, cmd.size);
2342
2343 if (enable) {
2344 status = pci_write_config_dword(adapter->pdev,
2345 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2346 if (status) {
2347 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002348 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002349 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2350 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002351 return status;
2352 }
2353 status = be_cmd_enable_magic_wol(adapter,
2354 adapter->netdev->dev_addr, &cmd);
2355 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2356 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2357 } else {
2358 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2359 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2360 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2361 }
2362
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002363 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002364 return status;
2365}
2366
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002367/*
2368 * Generate a seed MAC address from the PF MAC Address using jhash.
2369 * MAC Address for VFs are assigned incrementally starting from the seed.
2370 * These addresses are programmed in the ASIC by the PF and the VF driver
2371 * queries for the MAC address during its probe.
2372 */
2373static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2374{
2375 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002376 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002377 u8 mac[ETH_ALEN];
2378
2379 be_vf_eth_addr_generate(adapter, mac);
2380
2381 for (vf = 0; vf < num_vfs; vf++) {
2382 status = be_cmd_pmac_add(adapter, mac,
2383 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002384 &adapter->vf_cfg[vf].vf_pmac_id,
2385 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002386 if (status)
2387 dev_err(&adapter->pdev->dev,
2388 "Mac address add failed for VF %d\n", vf);
2389 else
2390 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2391
2392 mac[5] += 1;
2393 }
2394 return status;
2395}
2396
2397static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2398{
2399 u32 vf;
2400
2401 for (vf = 0; vf < num_vfs; vf++) {
2402 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2403 be_cmd_pmac_del(adapter,
2404 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002405 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002406 }
2407}
2408
Sathya Perla5fb379e2009-06-18 00:02:59 +00002409static int be_setup(struct be_adapter *adapter)
2410{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002411 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002412 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002413 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002414 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002415
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002416 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2417 BE_IF_FLAGS_BROADCAST |
2418 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002419
2420 if (be_physfn(adapter)) {
2421 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2422 BE_IF_FLAGS_PROMISCUOUS |
2423 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2424 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002425
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002426 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002427 cap_flags |= BE_IF_FLAGS_RSS;
2428 en_flags |= BE_IF_FLAGS_RSS;
2429 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002430 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002431
2432 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2433 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002434 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002435 if (status != 0)
2436 goto do_none;
2437
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002438 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002439 if (adapter->sriov_enabled) {
2440 while (vf < num_vfs) {
2441 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2442 BE_IF_FLAGS_BROADCAST;
2443 status = be_cmd_if_create(adapter, cap_flags,
2444 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002445 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002446 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002447 if (status) {
2448 dev_err(&adapter->pdev->dev,
2449 "Interface Create failed for VF %d\n",
2450 vf);
2451 goto if_destroy;
2452 }
2453 adapter->vf_cfg[vf].vf_pmac_id =
2454 BE_INVALID_PMAC_ID;
2455 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002456 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002457 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002458 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002459 status = be_cmd_mac_addr_query(adapter, mac,
2460 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2461 if (!status) {
2462 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2463 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2464 }
2465 }
2466
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002467 status = be_tx_queues_create(adapter);
2468 if (status != 0)
2469 goto if_destroy;
2470
2471 status = be_rx_queues_create(adapter);
2472 if (status != 0)
2473 goto tx_qs_destroy;
2474
Sathya Perla5fb379e2009-06-18 00:02:59 +00002475 status = be_mcc_queues_create(adapter);
2476 if (status != 0)
2477 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002478
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002479 adapter->link_speed = -1;
2480
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481 return 0;
2482
Sathya Perla5fb379e2009-06-18 00:02:59 +00002483rx_qs_destroy:
2484 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002485tx_qs_destroy:
2486 be_tx_queues_destroy(adapter);
2487if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002488 if (be_physfn(adapter) && adapter->sriov_enabled)
2489 for (vf = 0; vf < num_vfs; vf++)
2490 if (adapter->vf_cfg[vf].vf_if_handle)
2491 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002492 adapter->vf_cfg[vf].vf_if_handle,
2493 vf + 1);
2494 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495do_none:
2496 return status;
2497}
2498
Sathya Perla5fb379e2009-06-18 00:02:59 +00002499static int be_clear(struct be_adapter *adapter)
2500{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002501 int vf;
2502
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002503 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002504 be_vf_eth_addr_rem(adapter);
2505
Sathya Perla1a8887d2009-08-17 00:58:41 +00002506 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002507 be_rx_queues_destroy(adapter);
2508 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002509 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002510
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002511 if (be_physfn(adapter) && adapter->sriov_enabled)
2512 for (vf = 0; vf < num_vfs; vf++)
2513 if (adapter->vf_cfg[vf].vf_if_handle)
2514 be_cmd_if_destroy(adapter,
2515 adapter->vf_cfg[vf].vf_if_handle,
2516 vf + 1);
2517
Ajit Khaparde658681f2011-02-11 13:34:46 +00002518 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002519
Sathya Perla2243e2e2009-11-22 22:02:03 +00002520 /* tell fw we're done with firing cmds */
2521 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002522 return 0;
2523}
2524
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002525
Ajit Khaparde84517482009-09-04 03:12:16 +00002526#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002527static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002528 const u8 *p, u32 img_start, int image_size,
2529 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002530{
2531 u32 crc_offset;
2532 u8 flashed_crc[4];
2533 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002534
2535 crc_offset = hdr_size + img_start + image_size - 4;
2536
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002537 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002538
2539 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002540 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002541 if (status) {
2542 dev_err(&adapter->pdev->dev,
2543 "could not get crc from flash, not flashing redboot\n");
2544 return false;
2545 }
2546
2547 /*update redboot only if crc does not match*/
2548 if (!memcmp(flashed_crc, p, 4))
2549 return false;
2550 else
2551 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002552}
2553
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002554static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002555 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002556 struct be_dma_mem *flash_cmd, int num_of_images)
2557
Ajit Khaparde84517482009-09-04 03:12:16 +00002558{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002559 int status = 0, i, filehdr_size = 0;
2560 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002561 int num_bytes;
2562 const u8 *p = fw->data;
2563 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002564 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002565 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002566
Joe Perches215faf92010-12-21 02:16:10 -08002567 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002568 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2569 FLASH_IMAGE_MAX_SIZE_g3},
2570 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2571 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2572 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2573 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2574 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2575 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2576 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2577 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2578 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2579 FLASH_IMAGE_MAX_SIZE_g3},
2580 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2581 FLASH_IMAGE_MAX_SIZE_g3},
2582 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002583 FLASH_IMAGE_MAX_SIZE_g3},
2584 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2585 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002586 };
Joe Perches215faf92010-12-21 02:16:10 -08002587 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002588 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2589 FLASH_IMAGE_MAX_SIZE_g2},
2590 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2591 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2592 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2593 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2594 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2595 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2596 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2597 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2598 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2599 FLASH_IMAGE_MAX_SIZE_g2},
2600 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2601 FLASH_IMAGE_MAX_SIZE_g2},
2602 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2603 FLASH_IMAGE_MAX_SIZE_g2}
2604 };
2605
2606 if (adapter->generation == BE_GEN3) {
2607 pflashcomp = gen3_flash_types;
2608 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002609 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002610 } else {
2611 pflashcomp = gen2_flash_types;
2612 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002613 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002614 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002615 for (i = 0; i < num_comp; i++) {
2616 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2617 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2618 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002619 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2620 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002621 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2622 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002623 continue;
2624 p = fw->data;
2625 p += filehdr_size + pflashcomp[i].offset
2626 + (num_of_images * sizeof(struct image_hdr));
2627 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002628 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002629 total_bytes = pflashcomp[i].size;
2630 while (total_bytes) {
2631 if (total_bytes > 32*1024)
2632 num_bytes = 32*1024;
2633 else
2634 num_bytes = total_bytes;
2635 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002636
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002637 if (!total_bytes)
2638 flash_op = FLASHROM_OPER_FLASH;
2639 else
2640 flash_op = FLASHROM_OPER_SAVE;
2641 memcpy(req->params.data_buf, p, num_bytes);
2642 p += num_bytes;
2643 status = be_cmd_write_flashrom(adapter, flash_cmd,
2644 pflashcomp[i].optype, flash_op, num_bytes);
2645 if (status) {
2646 dev_err(&adapter->pdev->dev,
2647 "cmd to write to flash rom failed.\n");
2648 return -1;
2649 }
2650 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002651 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002652 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002653 return 0;
2654}
2655
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002656static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2657{
2658 if (fhdr == NULL)
2659 return 0;
2660 if (fhdr->build[0] == '3')
2661 return BE_GEN3;
2662 else if (fhdr->build[0] == '2')
2663 return BE_GEN2;
2664 else
2665 return 0;
2666}
2667
Ajit Khaparde84517482009-09-04 03:12:16 +00002668int be_load_fw(struct be_adapter *adapter, u8 *func)
2669{
2670 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2671 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002672 struct flash_file_hdr_g2 *fhdr;
2673 struct flash_file_hdr_g3 *fhdr3;
2674 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002675 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002676 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002677 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002678
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002679 if (!netif_running(adapter->netdev)) {
2680 dev_err(&adapter->pdev->dev,
2681 "Firmware load not allowed (interface is down)\n");
2682 return -EPERM;
2683 }
2684
Ajit Khaparde84517482009-09-04 03:12:16 +00002685 strcpy(fw_file, func);
2686
2687 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2688 if (status)
2689 goto fw_exit;
2690
2691 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002692 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002693 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2694
Ajit Khaparde84517482009-09-04 03:12:16 +00002695 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002696 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2697 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002698 if (!flash_cmd.va) {
2699 status = -ENOMEM;
2700 dev_err(&adapter->pdev->dev,
2701 "Memory allocation failure while flashing\n");
2702 goto fw_exit;
2703 }
2704
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002705 if ((adapter->generation == BE_GEN3) &&
2706 (get_ufigen_type(fhdr) == BE_GEN3)) {
2707 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002708 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2709 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002710 img_hdr_ptr = (struct image_hdr *) (fw->data +
2711 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002712 i * sizeof(struct image_hdr)));
2713 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2714 status = be_flash_data(adapter, fw, &flash_cmd,
2715 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002716 }
2717 } else if ((adapter->generation == BE_GEN2) &&
2718 (get_ufigen_type(fhdr) == BE_GEN2)) {
2719 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2720 } else {
2721 dev_err(&adapter->pdev->dev,
2722 "UFI and Interface are not compatible for flashing\n");
2723 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002724 }
2725
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002726 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2727 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002728 if (status) {
2729 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2730 goto fw_exit;
2731 }
2732
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002733 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002734
2735fw_exit:
2736 release_firmware(fw);
2737 return status;
2738}
2739
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740static struct net_device_ops be_netdev_ops = {
2741 .ndo_open = be_open,
2742 .ndo_stop = be_close,
2743 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002744 .ndo_set_rx_mode = be_set_multicast_list,
2745 .ndo_set_mac_address = be_mac_addr_set,
2746 .ndo_change_mtu = be_change_mtu,
2747 .ndo_validate_addr = eth_validate_addr,
2748 .ndo_vlan_rx_register = be_vlan_register,
2749 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2750 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002751 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002752 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002753 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002754 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002755};
2756
2757static void be_netdev_init(struct net_device *netdev)
2758{
2759 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002760 struct be_rx_obj *rxo;
2761 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002762
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002763 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002764 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2765 NETIF_F_HW_VLAN_TX;
2766 if (be_multi_rxq(adapter))
2767 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002768
2769 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002770 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002771
Michał Mirosław79032642010-11-30 06:38:00 +00002772 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2773 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002774
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002775 if (lancer_chip(adapter))
2776 netdev->vlan_features |= NETIF_F_TSO6;
2777
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002778 netdev->flags |= IFF_MULTICAST;
2779
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002780 /* Default settings for Rx and Tx flow control */
2781 adapter->rx_fc = true;
2782 adapter->tx_fc = true;
2783
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002784 netif_set_gso_max_size(netdev, 65535);
2785
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2787
2788 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2789
Sathya Perla3abcded2010-10-03 22:12:27 -07002790 for_all_rx_queues(adapter, rxo, i)
2791 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2792 BE_NAPI_WEIGHT);
2793
Sathya Perla5fb379e2009-06-18 00:02:59 +00002794 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002795 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002796}
2797
2798static void be_unmap_pci_bars(struct be_adapter *adapter)
2799{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002800 if (adapter->csr)
2801 iounmap(adapter->csr);
2802 if (adapter->db)
2803 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002804 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002805 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002806}
2807
2808static int be_map_pci_bars(struct be_adapter *adapter)
2809{
2810 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002811 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002812
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002813 if (lancer_chip(adapter)) {
2814 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2815 pci_resource_len(adapter->pdev, 0));
2816 if (addr == NULL)
2817 return -ENOMEM;
2818 adapter->db = addr;
2819 return 0;
2820 }
2821
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002822 if (be_physfn(adapter)) {
2823 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2824 pci_resource_len(adapter->pdev, 2));
2825 if (addr == NULL)
2826 return -ENOMEM;
2827 adapter->csr = addr;
2828 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002829
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002830 if (adapter->generation == BE_GEN2) {
2831 pcicfg_reg = 1;
2832 db_reg = 4;
2833 } else {
2834 pcicfg_reg = 0;
2835 if (be_physfn(adapter))
2836 db_reg = 4;
2837 else
2838 db_reg = 0;
2839 }
2840 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2841 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002842 if (addr == NULL)
2843 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002844 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002845
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002846 if (be_physfn(adapter)) {
2847 addr = ioremap_nocache(
2848 pci_resource_start(adapter->pdev, pcicfg_reg),
2849 pci_resource_len(adapter->pdev, pcicfg_reg));
2850 if (addr == NULL)
2851 goto pci_map_err;
2852 adapter->pcicfg = addr;
2853 } else
2854 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002855
2856 return 0;
2857pci_map_err:
2858 be_unmap_pci_bars(adapter);
2859 return -ENOMEM;
2860}
2861
2862
2863static void be_ctrl_cleanup(struct be_adapter *adapter)
2864{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002865 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002866
2867 be_unmap_pci_bars(adapter);
2868
2869 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002870 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2871 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002872
2873 mem = &adapter->mc_cmd_mem;
2874 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002875 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2876 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002877}
2878
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002879static int be_ctrl_init(struct be_adapter *adapter)
2880{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002881 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2882 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002883 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002884 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002885
2886 status = be_map_pci_bars(adapter);
2887 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002888 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002889
2890 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002891 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2892 mbox_mem_alloc->size,
2893 &mbox_mem_alloc->dma,
2894 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002895 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002896 status = -ENOMEM;
2897 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002898 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002899
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002900 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2901 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2902 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2903 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002904
2905 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002906 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2907 mc_cmd_mem->size, &mc_cmd_mem->dma,
2908 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002909 if (mc_cmd_mem->va == NULL) {
2910 status = -ENOMEM;
2911 goto free_mbox;
2912 }
2913 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2914
Ivan Vecera29849612010-12-14 05:43:19 +00002915 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002916 spin_lock_init(&adapter->mcc_lock);
2917 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002918
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002919 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002920 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002921 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002922
2923free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002924 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2925 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002926
2927unmap_pci_bars:
2928 be_unmap_pci_bars(adapter);
2929
2930done:
2931 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002932}
2933
2934static void be_stats_cleanup(struct be_adapter *adapter)
2935{
Sathya Perla3abcded2010-10-03 22:12:27 -07002936 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937
2938 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002939 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2940 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002941}
2942
2943static int be_stats_init(struct be_adapter *adapter)
2944{
Sathya Perla3abcded2010-10-03 22:12:27 -07002945 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002946
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00002947 if (adapter->generation == BE_GEN2)
2948 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
2949 else
2950 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002951 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2952 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002953 if (cmd->va == NULL)
2954 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002955 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002956 return 0;
2957}
2958
2959static void __devexit be_remove(struct pci_dev *pdev)
2960{
2961 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002962
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002963 if (!adapter)
2964 return;
2965
Somnath Koturf203af72010-10-25 23:01:03 +00002966 cancel_delayed_work_sync(&adapter->work);
2967
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968 unregister_netdev(adapter->netdev);
2969
Sathya Perla5fb379e2009-06-18 00:02:59 +00002970 be_clear(adapter);
2971
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002972 be_stats_cleanup(adapter);
2973
2974 be_ctrl_cleanup(adapter);
2975
Ajit Khaparde48f5a192011-04-06 18:08:30 +00002976 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002977 be_sriov_disable(adapter);
2978
Sathya Perla8d56ff12009-11-22 22:02:26 +00002979 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980
2981 pci_set_drvdata(pdev, NULL);
2982 pci_release_regions(pdev);
2983 pci_disable_device(pdev);
2984
2985 free_netdev(adapter->netdev);
2986}
2987
Sathya Perla2243e2e2009-11-22 22:02:03 +00002988static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002989{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002990 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002991 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002992
Sathya Perla8788fdc2009-07-27 22:52:03 +00002993 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002994 if (status)
2995 return status;
2996
Sathya Perla3abcded2010-10-03 22:12:27 -07002997 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2998 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002999 if (status)
3000 return status;
3001
3002 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003003
3004 if (be_physfn(adapter)) {
3005 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003006 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003007
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003008 if (status)
3009 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003010
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003011 if (!is_valid_ether_addr(mac))
3012 return -EADDRNOTAVAIL;
3013
3014 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3015 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3016 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003017
Ajit Khaparde3486be22010-07-23 02:04:54 +00003018 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003019 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3020 else
3021 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3022
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003023 status = be_cmd_get_cntl_attributes(adapter);
3024 if (status)
3025 return status;
3026
Sathya Perla2e588f82011-03-11 02:49:26 +00003027 be_cmd_check_native_mode(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003028 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003029}
3030
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003031static int be_dev_family_check(struct be_adapter *adapter)
3032{
3033 struct pci_dev *pdev = adapter->pdev;
3034 u32 sli_intf = 0, if_type;
3035
3036 switch (pdev->device) {
3037 case BE_DEVICE_ID1:
3038 case OC_DEVICE_ID1:
3039 adapter->generation = BE_GEN2;
3040 break;
3041 case BE_DEVICE_ID2:
3042 case OC_DEVICE_ID2:
3043 adapter->generation = BE_GEN3;
3044 break;
3045 case OC_DEVICE_ID3:
3046 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3047 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3048 SLI_INTF_IF_TYPE_SHIFT;
3049
3050 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3051 if_type != 0x02) {
3052 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3053 return -EINVAL;
3054 }
3055 if (num_vfs > 0) {
3056 dev_err(&pdev->dev, "VFs not supported\n");
3057 return -EINVAL;
3058 }
3059 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3060 SLI_INTF_FAMILY_SHIFT);
3061 adapter->generation = BE_GEN3;
3062 break;
3063 default:
3064 adapter->generation = 0;
3065 }
3066 return 0;
3067}
3068
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003069static int lancer_wait_ready(struct be_adapter *adapter)
3070{
3071#define SLIPORT_READY_TIMEOUT 500
3072 u32 sliport_status;
3073 int status = 0, i;
3074
3075 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3076 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3077 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3078 break;
3079
3080 msleep(20);
3081 }
3082
3083 if (i == SLIPORT_READY_TIMEOUT)
3084 status = -1;
3085
3086 return status;
3087}
3088
3089static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3090{
3091 int status;
3092 u32 sliport_status, err, reset_needed;
3093 status = lancer_wait_ready(adapter);
3094 if (!status) {
3095 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3096 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3097 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3098 if (err && reset_needed) {
3099 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3100 adapter->db + SLIPORT_CONTROL_OFFSET);
3101
3102 /* check adapter has corrected the error */
3103 status = lancer_wait_ready(adapter);
3104 sliport_status = ioread32(adapter->db +
3105 SLIPORT_STATUS_OFFSET);
3106 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3107 SLIPORT_STATUS_RN_MASK);
3108 if (status || sliport_status)
3109 status = -1;
3110 } else if (err || reset_needed) {
3111 status = -1;
3112 }
3113 }
3114 return status;
3115}
3116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003117static int __devinit be_probe(struct pci_dev *pdev,
3118 const struct pci_device_id *pdev_id)
3119{
3120 int status = 0;
3121 struct be_adapter *adapter;
3122 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003123
3124 status = pci_enable_device(pdev);
3125 if (status)
3126 goto do_none;
3127
3128 status = pci_request_regions(pdev, DRV_NAME);
3129 if (status)
3130 goto disable_dev;
3131 pci_set_master(pdev);
3132
3133 netdev = alloc_etherdev(sizeof(struct be_adapter));
3134 if (netdev == NULL) {
3135 status = -ENOMEM;
3136 goto rel_reg;
3137 }
3138 adapter = netdev_priv(netdev);
3139 adapter->pdev = pdev;
3140 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003141
3142 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003143 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003144 goto free_netdev;
3145
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003146 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003147 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003149 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003150 if (!status) {
3151 netdev->features |= NETIF_F_HIGHDMA;
3152 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003153 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003154 if (status) {
3155 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3156 goto free_netdev;
3157 }
3158 }
3159
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003160 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003161 if (adapter->sriov_enabled) {
3162 adapter->vf_cfg = kcalloc(num_vfs,
3163 sizeof(struct be_vf_cfg), GFP_KERNEL);
3164
3165 if (!adapter->vf_cfg)
3166 goto free_netdev;
3167 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003168
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003169 status = be_ctrl_init(adapter);
3170 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003171 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003172
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003173 if (lancer_chip(adapter)) {
3174 status = lancer_test_and_set_rdy_state(adapter);
3175 if (status) {
3176 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003177 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003178 }
3179 }
3180
Sathya Perla2243e2e2009-11-22 22:02:03 +00003181 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003182 if (be_physfn(adapter)) {
3183 status = be_cmd_POST(adapter);
3184 if (status)
3185 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003186 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003187
3188 /* tell fw we're ready to fire cmds */
3189 status = be_cmd_fw_init(adapter);
3190 if (status)
3191 goto ctrl_clean;
3192
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003193 status = be_cmd_reset_function(adapter);
3194 if (status)
3195 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003196
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003197 status = be_stats_init(adapter);
3198 if (status)
3199 goto ctrl_clean;
3200
Sathya Perla2243e2e2009-11-22 22:02:03 +00003201 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003202 if (status)
3203 goto stats_clean;
3204
Sathya Perla3abcded2010-10-03 22:12:27 -07003205 be_msix_enable(adapter);
3206
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003207 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208
Sathya Perla5fb379e2009-06-18 00:02:59 +00003209 status = be_setup(adapter);
3210 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003211 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003212
Sathya Perla3abcded2010-10-03 22:12:27 -07003213 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003214 status = register_netdev(netdev);
3215 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003216 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003217 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003218
Ajit Khapardee6319362011-02-11 13:35:41 +00003219 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003220 u8 mac_speed;
3221 bool link_up;
3222 u16 vf, lnk_speed;
3223
Ajit Khapardee6319362011-02-11 13:35:41 +00003224 status = be_vf_eth_addr_config(adapter);
3225 if (status)
3226 goto unreg_netdev;
Ajit Khaparded0381c42011-04-19 12:11:55 +00003227
3228 for (vf = 0; vf < num_vfs; vf++) {
3229 status = be_cmd_link_status_query(adapter, &link_up,
3230 &mac_speed, &lnk_speed, vf + 1);
3231 if (!status)
3232 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3233 else
3234 goto unreg_netdev;
3235 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003236 }
3237
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003238 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00003239 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003240 return 0;
3241
Ajit Khapardee6319362011-02-11 13:35:41 +00003242unreg_netdev:
3243 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003244unsetup:
3245 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003246msix_disable:
3247 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003248stats_clean:
3249 be_stats_cleanup(adapter);
3250ctrl_clean:
3251 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003252free_vf_cfg:
3253 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003254free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003255 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003256 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003257 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003258rel_reg:
3259 pci_release_regions(pdev);
3260disable_dev:
3261 pci_disable_device(pdev);
3262do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003263 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003264 return status;
3265}
3266
3267static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3268{
3269 struct be_adapter *adapter = pci_get_drvdata(pdev);
3270 struct net_device *netdev = adapter->netdev;
3271
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003272 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003273 if (adapter->wol)
3274 be_setup_wol(adapter, true);
3275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003276 netif_device_detach(netdev);
3277 if (netif_running(netdev)) {
3278 rtnl_lock();
3279 be_close(netdev);
3280 rtnl_unlock();
3281 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003282 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003283 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003285 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003286 pci_save_state(pdev);
3287 pci_disable_device(pdev);
3288 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3289 return 0;
3290}
3291
3292static int be_resume(struct pci_dev *pdev)
3293{
3294 int status = 0;
3295 struct be_adapter *adapter = pci_get_drvdata(pdev);
3296 struct net_device *netdev = adapter->netdev;
3297
3298 netif_device_detach(netdev);
3299
3300 status = pci_enable_device(pdev);
3301 if (status)
3302 return status;
3303
3304 pci_set_power_state(pdev, 0);
3305 pci_restore_state(pdev);
3306
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003307 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003308 /* tell fw we're ready to fire cmds */
3309 status = be_cmd_fw_init(adapter);
3310 if (status)
3311 return status;
3312
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003313 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314 if (netif_running(netdev)) {
3315 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003316 be_open(netdev);
3317 rtnl_unlock();
3318 }
3319 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003320
3321 if (adapter->wol)
3322 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003323
3324 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003325 return 0;
3326}
3327
Sathya Perla82456b02010-02-17 01:35:37 +00003328/*
3329 * An FLR will stop BE from DMAing any data.
3330 */
3331static void be_shutdown(struct pci_dev *pdev)
3332{
3333 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003334
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003335 if (!adapter)
3336 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003337
Sathya Perla0f4a6822011-03-21 20:49:28 +00003338 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003339
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003340 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003341
Sathya Perla82456b02010-02-17 01:35:37 +00003342 if (adapter->wol)
3343 be_setup_wol(adapter, true);
3344
Ajit Khaparde57841862011-04-06 18:08:43 +00003345 be_cmd_reset_function(adapter);
3346
Sathya Perla82456b02010-02-17 01:35:37 +00003347 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003348}
3349
Sathya Perlacf588472010-02-14 21:22:01 +00003350static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3351 pci_channel_state_t state)
3352{
3353 struct be_adapter *adapter = pci_get_drvdata(pdev);
3354 struct net_device *netdev = adapter->netdev;
3355
3356 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3357
3358 adapter->eeh_err = true;
3359
3360 netif_device_detach(netdev);
3361
3362 if (netif_running(netdev)) {
3363 rtnl_lock();
3364 be_close(netdev);
3365 rtnl_unlock();
3366 }
3367 be_clear(adapter);
3368
3369 if (state == pci_channel_io_perm_failure)
3370 return PCI_ERS_RESULT_DISCONNECT;
3371
3372 pci_disable_device(pdev);
3373
3374 return PCI_ERS_RESULT_NEED_RESET;
3375}
3376
3377static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3378{
3379 struct be_adapter *adapter = pci_get_drvdata(pdev);
3380 int status;
3381
3382 dev_info(&adapter->pdev->dev, "EEH reset\n");
3383 adapter->eeh_err = false;
3384
3385 status = pci_enable_device(pdev);
3386 if (status)
3387 return PCI_ERS_RESULT_DISCONNECT;
3388
3389 pci_set_master(pdev);
3390 pci_set_power_state(pdev, 0);
3391 pci_restore_state(pdev);
3392
3393 /* Check if card is ok and fw is ready */
3394 status = be_cmd_POST(adapter);
3395 if (status)
3396 return PCI_ERS_RESULT_DISCONNECT;
3397
3398 return PCI_ERS_RESULT_RECOVERED;
3399}
3400
3401static void be_eeh_resume(struct pci_dev *pdev)
3402{
3403 int status = 0;
3404 struct be_adapter *adapter = pci_get_drvdata(pdev);
3405 struct net_device *netdev = adapter->netdev;
3406
3407 dev_info(&adapter->pdev->dev, "EEH resume\n");
3408
3409 pci_save_state(pdev);
3410
3411 /* tell fw we're ready to fire cmds */
3412 status = be_cmd_fw_init(adapter);
3413 if (status)
3414 goto err;
3415
3416 status = be_setup(adapter);
3417 if (status)
3418 goto err;
3419
3420 if (netif_running(netdev)) {
3421 status = be_open(netdev);
3422 if (status)
3423 goto err;
3424 }
3425 netif_device_attach(netdev);
3426 return;
3427err:
3428 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003429}
3430
3431static struct pci_error_handlers be_eeh_handlers = {
3432 .error_detected = be_eeh_err_detected,
3433 .slot_reset = be_eeh_reset,
3434 .resume = be_eeh_resume,
3435};
3436
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003437static struct pci_driver be_driver = {
3438 .name = DRV_NAME,
3439 .id_table = be_dev_ids,
3440 .probe = be_probe,
3441 .remove = be_remove,
3442 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003443 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003444 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003445 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446};
3447
3448static int __init be_init_module(void)
3449{
Joe Perches8e95a202009-12-03 07:58:21 +00003450 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3451 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003452 printk(KERN_WARNING DRV_NAME
3453 " : Module param rx_frag_size must be 2048/4096/8192."
3454 " Using 2048\n");
3455 rx_frag_size = 2048;
3456 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003457
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003458 return pci_register_driver(&be_driver);
3459}
3460module_init(be_init_module);
3461
3462static void __exit be_exit_module(void)
3463{
3464 pci_unregister_driver(&be_driver);
3465}
3466module_exit(be_exit_module);