blob: 3202f67d8785ddc8ea299766ab8a528e3fe202bd [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
Sathya Perla2e588f82011-03-11 02:49:26 +000028static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000030module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000149
Sathya Perlacf588472010-02-14 21:22:01 +0000150 if (adapter->eeh_err)
151 return;
152
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 iowrite32(reg, addr);
161}
162
Sathya Perla8788fdc2009-07-27 22:52:03 +0000163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000168
169 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171}
172
Sathya Perla8788fdc2009-07-27 22:52:03 +0000173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000178
179 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700181}
182
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000190
191 if (adapter->eeh_err)
192 return;
193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201}
202
Sathya Perla8788fdc2009-07-27 22:52:03 +0000203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000209
210 if (adapter->eeh_err)
211 return;
212
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
Ajit Khapardef8617e02011-02-11 13:36:37 +0000234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000236 if (status)
237 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700238
Sathya Perlaa65027e2009-08-17 00:58:04 +0000239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000240 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000241netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
Sathya Perlab31c50a2009-09-17 10:30:13 -0700248void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700249{
Sathya Perla3abcded2010-10-03 22:12:27 -0700250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000255 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700256 struct be_rx_obj *rxo;
257 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258
Sathya Perla3abcded2010-10-03 22:12:27 -0700259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 }
268
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700271
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700286
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
291
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000296
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700302}
303
Sathya Perla8788fdc2009-07-27 22:52:03 +0000304void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700305{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 struct net_device *netdev = adapter->netdev;
307
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000309 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000310 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000311 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000314 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700317 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000318 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700319 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700324{
Sathya Perla3abcded2010-10-03 22:12:27 -0700325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700338
339 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700340 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700341 return;
342
Sathya Perla3abcded2010-10-03 22:12:27 -0700343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700344 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700345
Sathya Perla4097f662009-03-24 16:40:13 -0700346 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700358
359 rx_eq->cur_eqd = eqd;
360}
361
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
Sathya Perla4097f662009-03-24 16:40:13 -0700373static void be_tx_rate_update(struct be_adapter *adapter)
374{
Sathya Perla3abcded2010-10-03 22:12:27 -0700375 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700394static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700396{
Sathya Perla3abcded2010-10-03 22:12:27 -0700397 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700402 if (stopped)
403 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000407static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700410 int cnt = (skb->len > skb->data_len);
411
412 cnt += skb_shinfo(skb)->nr_frags;
413
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700414 /* to account for hdr wrb */
415 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000416 if (lancer_chip(adapter) || !(cnt & 1)) {
417 *dummy = false;
418 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700419 /* add a dummy to make it an even num */
420 cnt++;
421 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000422 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424 return cnt;
425}
426
427static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428{
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432}
433
Somnath Koturcc4ce022010-10-21 07:11:14 -0700434static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700436{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700437 u8 vlan_prio = 0;
438 u16 vlan_tag = 0;
439
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700440 memset(hdr, 0, sizeof(*hdr));
441
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000444 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453 if (is_tcp_pkt(skb))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455 tcpcs, hdr, 1);
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 udpcs, hdr, 1);
459 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461 if (is_tcp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465 }
466
Somnath Koturcc4ce022010-10-21 07:11:14 -0700467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476 }
477
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482}
483
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000484static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000485 bool unmap_single)
486{
487 dma_addr_t dma;
488
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000492 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000493 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000494 dma_unmap_single(dev, dma, wrb->frag_len,
495 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000496 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000498 }
499}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500
501static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503{
Sathya Perla7101e112010-03-22 20:41:12 +0000504 dma_addr_t busaddr;
505 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000506 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000511 bool map_single = false;
512 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700514 hdr = queue_head_node(txq);
515 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000516 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517
David S. Millerebc8d2a2009-06-09 01:01:31 -0700518 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700519 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000522 goto dma_err;
523 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527 queue_head_inc(txq);
528 copied += len;
529 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530
David S. Millerebc8d2a2009-06-09 01:01:31 -0700531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000537 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541 queue_head_inc(txq);
542 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 }
544
545 if (dummy_wrb) {
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, 0, 0);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 }
551
Somnath Koturcc4ce022010-10-21 07:11:14 -0700552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000556dma_err:
557 txq->head = map_head;
558 while (copied) {
559 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000560 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000561 map_single = false;
562 copied -= wrb->frag_len;
563 queue_head_inc(txq);
564 }
565 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700566}
567
Stephen Hemminger613573252009-08-31 19:50:58 +0000568static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700569 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700570{
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
577
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000581 if (copied) {
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
589 */
Sathya Perla7101e112010-03-22 20:41:12 +0000590 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592 txq->len) {
593 netif_stop_queue(netdev);
594 stopped = true;
595 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000597 be_txq_notify(adapter, txq->id, wrb_cnt);
598
Ajit Khaparde91992e42010-02-19 13:57:12 +0000599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000601 } else {
602 txq->head = start;
603 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 return NETDEV_TX_OK;
606}
607
608static int be_change_mtu(struct net_device *netdev, int new_mtu)
609{
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000616 BE_MIN_MTU,
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 return -EINVAL;
619 }
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
623 return 0;
624}
625
626/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000630static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
633 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000634 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000635 u32 if_handle;
636
637 if (vf) {
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642
Ajit Khaparde82903e42010-02-09 01:34:57 +0000643 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000645 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
648 ntags++;
649 }
650 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000657
Sathya Perlab31c50a2009-09-17 10:30:13 -0700658 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659}
660
661static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662{
663 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666}
667
668static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669{
670 struct be_adapter *adapter = netdev_priv(netdev);
671
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000672 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000673 if (!be_physfn(adapter))
674 return;
675
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000678 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
681static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682{
683 struct be_adapter *adapter = netdev_priv(netdev);
684
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000688 if (!be_physfn(adapter))
689 return;
690
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000692 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000693 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694}
695
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696static void be_set_multicast_list(struct net_device *netdev)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699
700 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000701 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000702 adapter->promiscuous = true;
703 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000705
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300706 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000709 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000710 }
711
Sathya Perlae7b909a2009-11-22 22:01:10 +0000712 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000716 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000717 goto done;
718 }
719
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800721 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000722done:
723 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724}
725
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000726static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 int status;
730
731 if (!adapter->sriov_enabled)
732 return -EPERM;
733
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735 return -EINVAL;
736
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000741
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000745
746 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000749 else
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000752 return status;
753}
754
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000755static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
757{
758 struct be_adapter *adapter = netdev_priv(netdev);
759
760 if (!adapter->sriov_enabled)
761 return -EPERM;
762
763 if (vf >= num_vfs)
764 return -EINVAL;
765
766 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000769 vi->qos = 0;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772 return 0;
773}
774
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000775static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
777{
778 struct be_adapter *adapter = netdev_priv(netdev);
779 int status = 0;
780
781 if (!adapter->sriov_enabled)
782 return -EPERM;
783
784 if ((vf >= num_vfs) || (vlan > 4095))
785 return -EINVAL;
786
787 if (vlan) {
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
790 } else {
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
793 }
794
795 status = be_vid_config(adapter, true, vf);
796
797 if (status)
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
800 return status;
801}
802
Ajit Khapardee1d18732010-07-23 01:52:13 +0000803static int be_set_vf_tx_rate(struct net_device *netdev,
804 int vf, int rate)
805{
806 struct be_adapter *adapter = netdev_priv(netdev);
807 int status = 0;
808
809 if (!adapter->sriov_enabled)
810 return -EPERM;
811
812 if ((vf >= num_vfs) || (rate < 0))
813 return -EINVAL;
814
815 if (rate > 10000)
816 rate = 10000;
817
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000820
821 if (status)
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
824 return status;
825}
826
Sathya Perla3abcded2010-10-03 22:12:27 -0700827static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828{
Sathya Perla3abcded2010-10-03 22:12:27 -0700829 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700830 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831
Sathya Perla4097f662009-03-24 16:40:13 -0700832 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700835 return;
836 }
837
838 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700839 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700840 return;
841
Sathya Perla3abcded2010-10-03 22:12:27 -0700842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700846}
847
Sathya Perla3abcded2010-10-03 22:12:27 -0700848static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000849 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700850{
Sathya Perla3abcded2010-10-03 22:12:27 -0700851 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700852
Sathya Perla3abcded2010-10-03 22:12:27 -0700853 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700856 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700858 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000859 if (rxcp->err)
860 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861}
862
Sathya Perla2e588f82011-03-11 02:49:26 +0000863static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700864{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700869}
870
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700871static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700872get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
874 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875{
876 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700877 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878
Sathya Perla3abcded2010-10-03 22:12:27 -0700879 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880 BUG_ON(!rx_page_info->page);
881
Ajit Khaparde205859a2010-02-09 01:34:21 +0000882 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000886 rx_page_info->last_page_user = false;
887 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888
889 atomic_dec(&rxq->used);
890 return rx_page_info;
891}
892
893/* Throwaway the data in the Rx completion */
894static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700895 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000896 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897{
Sathya Perla3abcded2010-10-03 22:12:27 -0700898 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +0000900 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +0000902 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +0000903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +0000904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +0000906 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 }
908}
909
910/*
911 * skb_fill_rx_data forms a complete skb for an ether frame
912 * indicated by rxcp.
913 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700914static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700916{
Sathya Perla3abcded2010-10-03 22:12:27 -0700917 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +0000919 u16 i, j;
920 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700921 u8 *start;
922
Sathya Perla2e588f82011-03-11 02:49:26 +0000923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700924 start = page_address(page_info->page) + page_info->page_offset;
925 prefetch(start);
926
927 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +0000928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700929
930 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +0000931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
937 skb->data_len = 0;
938 skb->tail += curr_frag_len;
939 } else {
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
947 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000948 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700949
Sathya Perla2e588f82011-03-11 02:49:26 +0000950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
952 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700953 }
954
955 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +0000956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700978
Sathya Perla2e588f82011-03-11 02:49:26 +0000979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000981 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000983 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984}
985
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700986/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700988 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000989 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700990{
Michał Mirosław6332c8d2011-04-07 02:43:48 +0000991 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700992 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +0000993
Michał Mirosław6332c8d2011-04-07 02:43:48 +0000994 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +0000995 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996 if (net_ratelimit())
997 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -0700998 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999 return;
1000 }
1001
Sathya Perla2e588f82011-03-11 02:49:26 +00001002 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001003
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001004 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001005 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001006 else
1007 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008
1009 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001010 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001011 if (adapter->netdev->features & NETIF_F_RXHASH)
1012 skb->rxhash = rxcp->rss_hash;
1013
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014
Sathya Perla2e588f82011-03-11 02:49:26 +00001015 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001016 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001017 kfree_skb(skb);
1018 return;
1019 }
Somnath Kotur6709d952011-05-04 22:40:46 +00001020 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1021 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001022 } else {
1023 netif_receive_skb(skb);
1024 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025}
1026
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001027/* Process the RX completion indicated by rxcp when GRO is enabled */
1028static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001029 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001030 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001031{
1032 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001033 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001034 struct be_queue_info *rxq = &rxo->q;
1035 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001036 u16 remaining, curr_frag_len;
1037 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001038
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001039 skb = napi_get_frags(&eq_obj->napi);
1040 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001041 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001042 return;
1043 }
1044
Sathya Perla2e588f82011-03-11 02:49:26 +00001045 remaining = rxcp->pkt_size;
1046 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048
1049 curr_frag_len = min(remaining, rx_frag_size);
1050
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001051 /* Coalesce all frags from the same physical page in one slot */
1052 if (i == 0 || page_info->page_offset == 0) {
1053 /* First frag or Fresh page */
1054 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001055 skb_shinfo(skb)->frags[j].page = page_info->page;
1056 skb_shinfo(skb)->frags[j].page_offset =
1057 page_info->page_offset;
1058 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001059 } else {
1060 put_page(page_info->page);
1061 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001062 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001065 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001066 memset(page_info, 0, sizeof(*page_info));
1067 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001068 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001070 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001071 skb->len = rxcp->pkt_size;
1072 skb->data_len = rxcp->pkt_size;
1073 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001074 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001075 if (adapter->netdev->features & NETIF_F_RXHASH)
1076 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001077
Sathya Perla2e588f82011-03-11 02:49:26 +00001078 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001079 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001080 else
Somnath Kotur6709d952011-05-04 22:40:46 +00001081 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1082 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083}
1084
Sathya Perla2e588f82011-03-11 02:49:26 +00001085static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1086 struct be_eth_rx_compl *compl,
1087 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088{
Sathya Perla2e588f82011-03-11 02:49:26 +00001089 rxcp->pkt_size =
1090 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1091 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1092 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1093 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001094 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001095 rxcp->ip_csum =
1096 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1097 rxcp->l4_csum =
1098 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1099 rxcp->ipv6 =
1100 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1101 rxcp->rxq_idx =
1102 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1103 rxcp->num_rcvd =
1104 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1105 rxcp->pkt_type =
1106 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001107 rxcp->rss_hash =
1108 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001109 if (rxcp->vlanf) {
1110 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001111 compl);
1112 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1113 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001114 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001115}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
Sathya Perla2e588f82011-03-11 02:49:26 +00001117static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1118 struct be_eth_rx_compl *compl,
1119 struct be_rx_compl_info *rxcp)
1120{
1121 rxcp->pkt_size =
1122 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1123 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1124 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1125 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001126 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001127 rxcp->ip_csum =
1128 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1129 rxcp->l4_csum =
1130 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1131 rxcp->ipv6 =
1132 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1133 rxcp->rxq_idx =
1134 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1135 rxcp->num_rcvd =
1136 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1137 rxcp->pkt_type =
1138 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001139 rxcp->rss_hash =
1140 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001141 if (rxcp->vlanf) {
1142 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001143 compl);
1144 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1145 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001146 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001147}
1148
1149static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1150{
1151 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1152 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1153 struct be_adapter *adapter = rxo->adapter;
1154
1155 /* For checking the valid bit it is Ok to use either definition as the
1156 * valid bit is at the same position in both v0 and v1 Rx compl */
1157 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158 return NULL;
1159
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001160 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001161 be_dws_le_to_cpu(compl, sizeof(*compl));
1162
1163 if (adapter->be3_native)
1164 be_parse_rx_compl_v1(adapter, compl, rxcp);
1165 else
1166 be_parse_rx_compl_v0(adapter, compl, rxcp);
1167
Sathya Perla15d72182011-03-21 20:49:26 +00001168 if (rxcp->vlanf) {
1169 /* vlanf could be wrongly set in some cards.
1170 * ignore if vtm is not set */
1171 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1172 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001173
Sathya Perla15d72182011-03-21 20:49:26 +00001174 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001175 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001176
David S. Miller3c709f82011-05-11 14:26:15 -04001177 if (((adapter->pvid & VLAN_VID_MASK) ==
1178 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1179 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001180 rxcp->vlanf = 0;
1181 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001182
1183 /* As the compl has been parsed, reset it; we wont touch it again */
1184 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185
Sathya Perla3abcded2010-10-03 22:12:27 -07001186 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187 return rxcp;
1188}
1189
Eric Dumazet1829b082011-03-01 05:48:12 +00001190static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001193
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001195 gfp |= __GFP_COMP;
1196 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197}
1198
1199/*
1200 * Allocate a page, split it to fragments of size rx_frag_size and post as
1201 * receive buffers to BE
1202 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001203static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204{
Sathya Perla3abcded2010-10-03 22:12:27 -07001205 struct be_adapter *adapter = rxo->adapter;
1206 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001207 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001208 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209 struct page *pagep = NULL;
1210 struct be_eth_rx_d *rxd;
1211 u64 page_dmaaddr = 0, frag_dmaaddr;
1212 u32 posted, page_offset = 0;
1213
Sathya Perla3abcded2010-10-03 22:12:27 -07001214 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1216 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001217 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001219 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220 break;
1221 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001222 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1223 0, adapter->big_page_size,
1224 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225 page_info->page_offset = 0;
1226 } else {
1227 get_page(pagep);
1228 page_info->page_offset = page_offset + rx_frag_size;
1229 }
1230 page_offset = page_info->page_offset;
1231 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001232 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1234
1235 rxd = queue_head_node(rxq);
1236 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1237 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238
1239 /* Any space left in the current big page for another frag? */
1240 if ((page_offset + rx_frag_size + rx_frag_size) >
1241 adapter->big_page_size) {
1242 pagep = NULL;
1243 page_info->last_page_user = true;
1244 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001245
1246 prev_page_info = page_info;
1247 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 page_info = &page_info_tbl[rxq->head];
1249 }
1250 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001251 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252
1253 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001255 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001256 } else if (atomic_read(&rxq->used) == 0) {
1257 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001258 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260}
1261
Sathya Perla5fb379e2009-06-18 00:02:59 +00001262static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1265
1266 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1267 return NULL;
1268
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001269 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1271
1272 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1273
1274 queue_tail_inc(tx_cq);
1275 return txcp;
1276}
1277
1278static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1279{
1280 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001281 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1283 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001284 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1285 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001287 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001289 sent_skbs[txq->tail] = NULL;
1290
1291 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001292 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001294 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001296 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001297 unmap_tx_frag(&adapter->pdev->dev, wrb,
1298 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001299 unmap_skb_hdr = false;
1300
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301 num_wrbs++;
1302 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001303 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
1305 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001306
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307 kfree_skb(sent_skb);
1308}
1309
Sathya Perla859b1e42009-08-10 03:43:51 +00001310static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1311{
1312 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1313
1314 if (!eqe->evt)
1315 return NULL;
1316
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001317 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001318 eqe->evt = le32_to_cpu(eqe->evt);
1319 queue_tail_inc(&eq_obj->q);
1320 return eqe;
1321}
1322
1323static int event_handle(struct be_adapter *adapter,
1324 struct be_eq_obj *eq_obj)
1325{
1326 struct be_eq_entry *eqe;
1327 u16 num = 0;
1328
1329 while ((eqe = event_get(eq_obj)) != NULL) {
1330 eqe->evt = 0;
1331 num++;
1332 }
1333
1334 /* Deal with any spurious interrupts that come
1335 * without events
1336 */
1337 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1338 if (num)
1339 napi_schedule(&eq_obj->napi);
1340
1341 return num;
1342}
1343
1344/* Just read and notify events without processing them.
1345 * Used at the time of destroying event queues */
1346static void be_eq_clean(struct be_adapter *adapter,
1347 struct be_eq_obj *eq_obj)
1348{
1349 struct be_eq_entry *eqe;
1350 u16 num = 0;
1351
1352 while ((eqe = event_get(eq_obj)) != NULL) {
1353 eqe->evt = 0;
1354 num++;
1355 }
1356
1357 if (num)
1358 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1359}
1360
Sathya Perla3abcded2010-10-03 22:12:27 -07001361static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362{
1363 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001364 struct be_queue_info *rxq = &rxo->q;
1365 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001366 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367 u16 tail;
1368
1369 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001370 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1371 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001372 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373 }
1374
1375 /* Then free posted rx buffer that were not used */
1376 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001377 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001378 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379 put_page(page_info->page);
1380 memset(page_info, 0, sizeof(*page_info));
1381 }
1382 BUG_ON(atomic_read(&rxq->used));
1383}
1384
Sathya Perlaa8e91792009-08-10 03:42:43 +00001385static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001387 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001389 struct be_eth_tx_compl *txcp;
1390 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001391 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1392 struct sk_buff *sent_skb;
1393 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394
Sathya Perlaa8e91792009-08-10 03:42:43 +00001395 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1396 do {
1397 while ((txcp = be_tx_compl_get(tx_cq))) {
1398 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1399 wrb_index, txcp);
1400 be_tx_compl_process(adapter, end_idx);
1401 cmpl++;
1402 }
1403 if (cmpl) {
1404 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1405 cmpl = 0;
1406 }
1407
1408 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1409 break;
1410
1411 mdelay(1);
1412 } while (true);
1413
1414 if (atomic_read(&txq->used))
1415 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1416 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001417
1418 /* free posted tx for which compls will never arrive */
1419 while (atomic_read(&txq->used)) {
1420 sent_skb = sent_skbs[txq->tail];
1421 end_idx = txq->tail;
1422 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001423 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1424 txq->len);
Sathya Perlab03388d2010-02-18 00:37:17 +00001425 be_tx_compl_process(adapter, end_idx);
1426 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427}
1428
Sathya Perla5fb379e2009-06-18 00:02:59 +00001429static void be_mcc_queues_destroy(struct be_adapter *adapter)
1430{
1431 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001432
Sathya Perla8788fdc2009-07-27 22:52:03 +00001433 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001434 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001435 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001436 be_queue_free(adapter, q);
1437
Sathya Perla8788fdc2009-07-27 22:52:03 +00001438 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001439 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001440 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001441 be_queue_free(adapter, q);
1442}
1443
1444/* Must be called only after TX qs are created as MCC shares TX EQ */
1445static int be_mcc_queues_create(struct be_adapter *adapter)
1446{
1447 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001448
1449 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001450 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001451 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001452 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001453 goto err;
1454
1455 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001456 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001457 goto mcc_cq_free;
1458
1459 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001460 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001461 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1462 goto mcc_cq_destroy;
1463
1464 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001465 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001466 goto mcc_q_free;
1467
1468 return 0;
1469
1470mcc_q_free:
1471 be_queue_free(adapter, q);
1472mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001473 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001474mcc_cq_free:
1475 be_queue_free(adapter, cq);
1476err:
1477 return -1;
1478}
1479
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480static void be_tx_queues_destroy(struct be_adapter *adapter)
1481{
1482 struct be_queue_info *q;
1483
1484 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001485 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001486 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 be_queue_free(adapter, q);
1488
1489 q = &adapter->tx_obj.cq;
1490 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001491 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 be_queue_free(adapter, q);
1493
Sathya Perla859b1e42009-08-10 03:43:51 +00001494 /* Clear any residual events */
1495 be_eq_clean(adapter, &adapter->tx_eq);
1496
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497 q = &adapter->tx_eq.q;
1498 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001499 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 be_queue_free(adapter, q);
1501}
1502
1503static int be_tx_queues_create(struct be_adapter *adapter)
1504{
1505 struct be_queue_info *eq, *q, *cq;
1506
1507 adapter->tx_eq.max_eqd = 0;
1508 adapter->tx_eq.min_eqd = 0;
1509 adapter->tx_eq.cur_eqd = 96;
1510 adapter->tx_eq.enable_aic = false;
1511 /* Alloc Tx Event queue */
1512 eq = &adapter->tx_eq.q;
1513 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1514 return -1;
1515
1516 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001517 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001519
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001520 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001521
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001522
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523 /* Alloc TX eth compl queue */
1524 cq = &adapter->tx_obj.cq;
1525 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1526 sizeof(struct be_eth_tx_compl)))
1527 goto tx_eq_destroy;
1528
1529 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001530 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531 goto tx_cq_free;
1532
1533 /* Alloc TX eth queue */
1534 q = &adapter->tx_obj.q;
1535 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1536 goto tx_cq_destroy;
1537
1538 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001539 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 goto tx_q_free;
1541 return 0;
1542
1543tx_q_free:
1544 be_queue_free(adapter, q);
1545tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001546 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547tx_cq_free:
1548 be_queue_free(adapter, cq);
1549tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001550 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551tx_eq_free:
1552 be_queue_free(adapter, eq);
1553 return -1;
1554}
1555
1556static void be_rx_queues_destroy(struct be_adapter *adapter)
1557{
1558 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001559 struct be_rx_obj *rxo;
1560 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561
Sathya Perla3abcded2010-10-03 22:12:27 -07001562 for_all_rx_queues(adapter, rxo, i) {
1563 q = &rxo->q;
1564 if (q->created) {
1565 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1566 /* After the rxq is invalidated, wait for a grace time
1567 * of 1ms for all dma to end and the flush compl to
1568 * arrive
1569 */
1570 mdelay(1);
1571 be_rx_q_clean(adapter, rxo);
1572 }
1573 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001574
Sathya Perla3abcded2010-10-03 22:12:27 -07001575 q = &rxo->cq;
1576 if (q->created)
1577 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1578 be_queue_free(adapter, q);
1579
1580 /* Clear any residual events */
1581 q = &rxo->rx_eq.q;
1582 if (q->created) {
1583 be_eq_clean(adapter, &rxo->rx_eq);
1584 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1585 }
1586 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588}
1589
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001590static u32 be_num_rxqs_want(struct be_adapter *adapter)
1591{
1592 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1593 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1594 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1595 } else {
1596 dev_warn(&adapter->pdev->dev,
1597 "No support for multiple RX queues\n");
1598 return 1;
1599 }
1600}
1601
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602static int be_rx_queues_create(struct be_adapter *adapter)
1603{
1604 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001605 struct be_rx_obj *rxo;
1606 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001608 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1609 msix_enabled(adapter) ?
1610 adapter->num_msix_vec - 1 : 1);
1611 if (adapter->num_rx_qs != MAX_RX_QS)
1612 dev_warn(&adapter->pdev->dev,
1613 "Can create only %d RX queues", adapter->num_rx_qs);
1614
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001616 for_all_rx_queues(adapter, rxo, i) {
1617 rxo->adapter = adapter;
1618 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1619 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620
Sathya Perla3abcded2010-10-03 22:12:27 -07001621 /* EQ */
1622 eq = &rxo->rx_eq.q;
1623 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1624 sizeof(struct be_eq_entry));
1625 if (rc)
1626 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627
Sathya Perla3abcded2010-10-03 22:12:27 -07001628 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1629 if (rc)
1630 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001632 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001633
Sathya Perla3abcded2010-10-03 22:12:27 -07001634 /* CQ */
1635 cq = &rxo->cq;
1636 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1637 sizeof(struct be_eth_rx_compl));
1638 if (rc)
1639 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640
Sathya Perla3abcded2010-10-03 22:12:27 -07001641 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1642 if (rc)
1643 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001644 /* Rx Q */
1645 q = &rxo->q;
1646 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1647 sizeof(struct be_eth_rx_d));
1648 if (rc)
1649 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650
Sathya Perla3abcded2010-10-03 22:12:27 -07001651 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1652 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1653 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1654 if (rc)
1655 goto err;
1656 }
1657
1658 if (be_multi_rxq(adapter)) {
1659 u8 rsstable[MAX_RSS_QS];
1660
1661 for_all_rss_queues(adapter, rxo, i)
1662 rsstable[i] = rxo->rss_id;
1663
1664 rc = be_cmd_rss_config(adapter, rsstable,
1665 adapter->num_rx_qs - 1);
1666 if (rc)
1667 goto err;
1668 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669
1670 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001671err:
1672 be_rx_queues_destroy(adapter);
1673 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001676static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001677{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001678 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1679 if (!eqe->evt)
1680 return false;
1681 else
1682 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001683}
1684
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685static irqreturn_t be_intx(int irq, void *dev)
1686{
1687 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001688 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001689 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001690
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001691 if (lancer_chip(adapter)) {
1692 if (event_peek(&adapter->tx_eq))
1693 tx = event_handle(adapter, &adapter->tx_eq);
1694 for_all_rx_queues(adapter, rxo, i) {
1695 if (event_peek(&rxo->rx_eq))
1696 rx |= event_handle(adapter, &rxo->rx_eq);
1697 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001699 if (!(tx || rx))
1700 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001701
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001702 } else {
1703 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1704 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1705 if (!isr)
1706 return IRQ_NONE;
1707
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001708 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001709 event_handle(adapter, &adapter->tx_eq);
1710
1711 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001712 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001713 event_handle(adapter, &rxo->rx_eq);
1714 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001715 }
Sathya Perlac001c212009-07-01 01:06:07 +00001716
Sathya Perla8788fdc2009-07-27 22:52:03 +00001717 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718}
1719
1720static irqreturn_t be_msix_rx(int irq, void *dev)
1721{
Sathya Perla3abcded2010-10-03 22:12:27 -07001722 struct be_rx_obj *rxo = dev;
1723 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724
Sathya Perla3abcded2010-10-03 22:12:27 -07001725 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726
1727 return IRQ_HANDLED;
1728}
1729
Sathya Perla5fb379e2009-06-18 00:02:59 +00001730static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731{
1732 struct be_adapter *adapter = dev;
1733
Sathya Perla8788fdc2009-07-27 22:52:03 +00001734 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735
1736 return IRQ_HANDLED;
1737}
1738
Sathya Perla2e588f82011-03-11 02:49:26 +00001739static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740{
Sathya Perla2e588f82011-03-11 02:49:26 +00001741 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742}
1743
stephen hemminger49b05222010-10-21 07:50:48 +00001744static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745{
1746 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001747 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1748 struct be_adapter *adapter = rxo->adapter;
1749 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001750 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751 u32 work_done;
1752
Sathya Perla3abcded2010-10-03 22:12:27 -07001753 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001755 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756 if (!rxcp)
1757 break;
1758
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001759 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001760 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001761 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001762 be_rx_compl_process_gro(adapter, rxo, rxcp);
1763 else
1764 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001765 } else if (rxcp->pkt_size == 0) {
1766 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001767 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001768
Sathya Perla2e588f82011-03-11 02:49:26 +00001769 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770 }
1771
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001773 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001774 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775
1776 /* All consumed */
1777 if (work_done < budget) {
1778 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001779 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780 } else {
1781 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001782 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 }
1784 return work_done;
1785}
1786
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001787/* As TX and MCC share the same EQ check for both TX and MCC completions.
1788 * For TX/MCC we don't honour budget; consume everything
1789 */
1790static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001792 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1793 struct be_adapter *adapter =
1794 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001795 struct be_queue_info *txq = &adapter->tx_obj.q;
1796 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001798 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799 u16 end_idx;
1800
Sathya Perla5fb379e2009-06-18 00:02:59 +00001801 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001803 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001805 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 }
1807
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001808 mcc_compl = be_process_mcc(adapter, &status);
1809
1810 napi_complete(napi);
1811
1812 if (mcc_compl) {
1813 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1814 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1815 }
1816
1817 if (tx_compl) {
1818 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001819
1820 /* As Tx wrbs have been freed up, wake up netdev queue if
1821 * it was stopped due to lack of tx wrbs.
1822 */
1823 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001825 netif_wake_queue(adapter->netdev);
1826 }
1827
Sathya Perla3abcded2010-10-03 22:12:27 -07001828 tx_stats(adapter)->be_tx_events++;
1829 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831
1832 return 1;
1833}
1834
Ajit Khaparded053de92010-09-03 06:23:30 +00001835void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001836{
1837 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1838 u32 i;
1839
1840 pci_read_config_dword(adapter->pdev,
1841 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1842 pci_read_config_dword(adapter->pdev,
1843 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1844 pci_read_config_dword(adapter->pdev,
1845 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1846 pci_read_config_dword(adapter->pdev,
1847 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1848
1849 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1850 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1851
Ajit Khaparded053de92010-09-03 06:23:30 +00001852 if (ue_status_lo || ue_status_hi) {
1853 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001854 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001855 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1856 }
1857
Ajit Khaparde7c185272010-07-29 06:16:33 +00001858 if (ue_status_lo) {
1859 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1860 if (ue_status_lo & 1)
1861 dev_err(&adapter->pdev->dev,
1862 "UE: %s bit set\n", ue_status_low_desc[i]);
1863 }
1864 }
1865 if (ue_status_hi) {
1866 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1867 if (ue_status_hi & 1)
1868 dev_err(&adapter->pdev->dev,
1869 "UE: %s bit set\n", ue_status_hi_desc[i]);
1870 }
1871 }
1872
1873}
1874
Sathya Perlaea1dae12009-03-19 23:56:20 -07001875static void be_worker(struct work_struct *work)
1876{
1877 struct be_adapter *adapter =
1878 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001879 struct be_rx_obj *rxo;
1880 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001881
Sathya Perla16da8252011-03-21 20:49:27 +00001882 if (!adapter->ue_detected && !lancer_chip(adapter))
1883 be_detect_dump_ue(adapter);
1884
Somnath Koturf203af72010-10-25 23:01:03 +00001885 /* when interrupts are not yet enabled, just reap any pending
1886 * mcc completions */
1887 if (!netif_running(adapter->netdev)) {
1888 int mcc_compl, status = 0;
1889
1890 mcc_compl = be_process_mcc(adapter, &status);
1891
1892 if (mcc_compl) {
1893 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1894 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1895 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00001896
Somnath Koturf203af72010-10-25 23:01:03 +00001897 goto reschedule;
1898 }
1899
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001900 if (!adapter->stats_cmd_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001901 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001902
Sathya Perla4097f662009-03-24 16:40:13 -07001903 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001904
Sathya Perla3abcded2010-10-03 22:12:27 -07001905 for_all_rx_queues(adapter, rxo, i) {
1906 be_rx_rate_update(rxo);
1907 be_rx_eqd_update(adapter, rxo);
1908
1909 if (rxo->rx_post_starved) {
1910 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00001911 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07001912 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001913 }
1914
Somnath Koturf203af72010-10-25 23:01:03 +00001915reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00001916 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001917 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1918}
1919
Sathya Perla8d56ff12009-11-22 22:02:26 +00001920static void be_msix_disable(struct be_adapter *adapter)
1921{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001922 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00001923 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001924 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001925 }
1926}
1927
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928static void be_msix_enable(struct be_adapter *adapter)
1929{
Sathya Perla3abcded2010-10-03 22:12:27 -07001930#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001931 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001933 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07001934
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001935 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 adapter->msix_entries[i].entry = i;
1937
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001938 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07001939 if (status == 0) {
1940 goto done;
1941 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001942 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07001943 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001944 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07001945 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07001946 }
1947 return;
1948done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001949 adapter->num_msix_vec = num_vec;
1950 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951}
1952
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001953static void be_sriov_enable(struct be_adapter *adapter)
1954{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001955 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001956#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001957 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00001958 int status, pos;
1959 u16 nvfs;
1960
1961 pos = pci_find_ext_capability(adapter->pdev,
1962 PCI_EXT_CAP_ID_SRIOV);
1963 pci_read_config_word(adapter->pdev,
1964 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1965
1966 if (num_vfs > nvfs) {
1967 dev_info(&adapter->pdev->dev,
1968 "Device supports %d VFs and not %d\n",
1969 nvfs, num_vfs);
1970 num_vfs = nvfs;
1971 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001972
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001973 status = pci_enable_sriov(adapter->pdev, num_vfs);
1974 adapter->sriov_enabled = status ? false : true;
1975 }
1976#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001977}
1978
1979static void be_sriov_disable(struct be_adapter *adapter)
1980{
1981#ifdef CONFIG_PCI_IOV
1982 if (adapter->sriov_enabled) {
1983 pci_disable_sriov(adapter->pdev);
1984 adapter->sriov_enabled = false;
1985 }
1986#endif
1987}
1988
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001989static inline int be_msix_vec_get(struct be_adapter *adapter,
1990 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001992 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00001993}
1994
1995static int be_request_irq(struct be_adapter *adapter,
1996 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001997 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001998{
1999 struct net_device *netdev = adapter->netdev;
2000 int vec;
2001
2002 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002003 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002004 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002005}
2006
Sathya Perla3abcded2010-10-03 22:12:27 -07002007static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2008 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002009{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002010 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002011 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012}
2013
2014static int be_msix_register(struct be_adapter *adapter)
2015{
Sathya Perla3abcded2010-10-03 22:12:27 -07002016 struct be_rx_obj *rxo;
2017 int status, i;
2018 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019
Sathya Perla3abcded2010-10-03 22:12:27 -07002020 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2021 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022 if (status)
2023 goto err;
2024
Sathya Perla3abcded2010-10-03 22:12:27 -07002025 for_all_rx_queues(adapter, rxo, i) {
2026 sprintf(qname, "rxq%d", i);
2027 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2028 qname, rxo);
2029 if (status)
2030 goto err_msix;
2031 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002032
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002034
Sathya Perla3abcded2010-10-03 22:12:27 -07002035err_msix:
2036 be_free_irq(adapter, &adapter->tx_eq, adapter);
2037
2038 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2039 be_free_irq(adapter, &rxo->rx_eq, rxo);
2040
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041err:
2042 dev_warn(&adapter->pdev->dev,
2043 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002044 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002045 return status;
2046}
2047
2048static int be_irq_register(struct be_adapter *adapter)
2049{
2050 struct net_device *netdev = adapter->netdev;
2051 int status;
2052
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002053 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002054 status = be_msix_register(adapter);
2055 if (status == 0)
2056 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002057 /* INTx is not supported for VF */
2058 if (!be_physfn(adapter))
2059 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060 }
2061
2062 /* INTx */
2063 netdev->irq = adapter->pdev->irq;
2064 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2065 adapter);
2066 if (status) {
2067 dev_err(&adapter->pdev->dev,
2068 "INTx request IRQ failed - err %d\n", status);
2069 return status;
2070 }
2071done:
2072 adapter->isr_registered = true;
2073 return 0;
2074}
2075
2076static void be_irq_unregister(struct be_adapter *adapter)
2077{
2078 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002079 struct be_rx_obj *rxo;
2080 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081
2082 if (!adapter->isr_registered)
2083 return;
2084
2085 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002086 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087 free_irq(netdev->irq, adapter);
2088 goto done;
2089 }
2090
2091 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002092 be_free_irq(adapter, &adapter->tx_eq, adapter);
2093
2094 for_all_rx_queues(adapter, rxo, i)
2095 be_free_irq(adapter, &rxo->rx_eq, rxo);
2096
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002097done:
2098 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099}
2100
Sathya Perla889cd4b2010-05-30 23:33:45 +00002101static int be_close(struct net_device *netdev)
2102{
2103 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002104 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002105 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002106 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002107
Sathya Perla889cd4b2010-05-30 23:33:45 +00002108 be_async_mcc_disable(adapter);
2109
Sathya Perla889cd4b2010-05-30 23:33:45 +00002110 netif_carrier_off(netdev);
2111 adapter->link_up = false;
2112
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002113 if (!lancer_chip(adapter))
2114 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002115
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002116 for_all_rx_queues(adapter, rxo, i)
2117 napi_disable(&rxo->rx_eq.napi);
2118
2119 napi_disable(&tx_eq->napi);
2120
2121 if (lancer_chip(adapter)) {
2122 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2123 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2124 for_all_rx_queues(adapter, rxo, i)
2125 be_cq_notify(adapter, rxo->cq.id, false, 0);
2126 }
2127
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002128 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002129 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002130 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002131
2132 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002133 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002134 synchronize_irq(vec);
2135 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002136 } else {
2137 synchronize_irq(netdev->irq);
2138 }
2139 be_irq_unregister(adapter);
2140
Sathya Perla889cd4b2010-05-30 23:33:45 +00002141 /* Wait for all pending tx completions to arrive so that
2142 * all tx skbs are freed.
2143 */
2144 be_tx_compl_clean(adapter);
2145
2146 return 0;
2147}
2148
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002149static int be_open(struct net_device *netdev)
2150{
2151 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002153 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002154 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002155 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002156 u8 mac_speed;
2157 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002158
Sathya Perla3abcded2010-10-03 22:12:27 -07002159 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002160 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002161 napi_enable(&rxo->rx_eq.napi);
2162 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002163 napi_enable(&tx_eq->napi);
2164
2165 be_irq_register(adapter);
2166
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002167 if (!lancer_chip(adapter))
2168 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002169
2170 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002171 for_all_rx_queues(adapter, rxo, i) {
2172 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2173 be_cq_notify(adapter, rxo->cq.id, true, 0);
2174 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002175 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002176
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002177 /* Now that interrupts are on we can process async mcc */
2178 be_async_mcc_enable(adapter);
2179
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002180 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002181 &link_speed, 0);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002182 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002183 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002184 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002185
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002186 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002187 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002188 if (status)
2189 goto err;
2190
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002191 status = be_cmd_set_flow_control(adapter,
2192 adapter->tx_fc, adapter->rx_fc);
2193 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002194 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002195 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002196
Sathya Perla889cd4b2010-05-30 23:33:45 +00002197 return 0;
2198err:
2199 be_close(adapter->netdev);
2200 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002201}
2202
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002203static int be_setup_wol(struct be_adapter *adapter, bool enable)
2204{
2205 struct be_dma_mem cmd;
2206 int status = 0;
2207 u8 mac[ETH_ALEN];
2208
2209 memset(mac, 0, ETH_ALEN);
2210
2211 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002212 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2213 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002214 if (cmd.va == NULL)
2215 return -1;
2216 memset(cmd.va, 0, cmd.size);
2217
2218 if (enable) {
2219 status = pci_write_config_dword(adapter->pdev,
2220 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2221 if (status) {
2222 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002223 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002224 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2225 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002226 return status;
2227 }
2228 status = be_cmd_enable_magic_wol(adapter,
2229 adapter->netdev->dev_addr, &cmd);
2230 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2231 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2232 } else {
2233 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2234 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2235 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2236 }
2237
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002238 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002239 return status;
2240}
2241
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002242/*
2243 * Generate a seed MAC address from the PF MAC Address using jhash.
2244 * MAC Address for VFs are assigned incrementally starting from the seed.
2245 * These addresses are programmed in the ASIC by the PF and the VF driver
2246 * queries for the MAC address during its probe.
2247 */
2248static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2249{
2250 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002251 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002252 u8 mac[ETH_ALEN];
2253
2254 be_vf_eth_addr_generate(adapter, mac);
2255
2256 for (vf = 0; vf < num_vfs; vf++) {
2257 status = be_cmd_pmac_add(adapter, mac,
2258 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002259 &adapter->vf_cfg[vf].vf_pmac_id,
2260 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002261 if (status)
2262 dev_err(&adapter->pdev->dev,
2263 "Mac address add failed for VF %d\n", vf);
2264 else
2265 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2266
2267 mac[5] += 1;
2268 }
2269 return status;
2270}
2271
2272static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2273{
2274 u32 vf;
2275
2276 for (vf = 0; vf < num_vfs; vf++) {
2277 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2278 be_cmd_pmac_del(adapter,
2279 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002280 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002281 }
2282}
2283
Sathya Perla5fb379e2009-06-18 00:02:59 +00002284static int be_setup(struct be_adapter *adapter)
2285{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002286 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002287 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002289 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002291 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2292 BE_IF_FLAGS_BROADCAST |
2293 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002294
2295 if (be_physfn(adapter)) {
2296 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2297 BE_IF_FLAGS_PROMISCUOUS |
2298 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2299 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002300
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002301 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002302 cap_flags |= BE_IF_FLAGS_RSS;
2303 en_flags |= BE_IF_FLAGS_RSS;
2304 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002305 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002306
2307 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2308 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002309 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310 if (status != 0)
2311 goto do_none;
2312
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002313 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002314 if (adapter->sriov_enabled) {
2315 while (vf < num_vfs) {
2316 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2317 BE_IF_FLAGS_BROADCAST;
2318 status = be_cmd_if_create(adapter, cap_flags,
2319 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002320 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002321 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002322 if (status) {
2323 dev_err(&adapter->pdev->dev,
2324 "Interface Create failed for VF %d\n",
2325 vf);
2326 goto if_destroy;
2327 }
2328 adapter->vf_cfg[vf].vf_pmac_id =
2329 BE_INVALID_PMAC_ID;
2330 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002331 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002332 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002333 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002334 status = be_cmd_mac_addr_query(adapter, mac,
2335 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2336 if (!status) {
2337 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2338 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2339 }
2340 }
2341
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342 status = be_tx_queues_create(adapter);
2343 if (status != 0)
2344 goto if_destroy;
2345
2346 status = be_rx_queues_create(adapter);
2347 if (status != 0)
2348 goto tx_qs_destroy;
2349
Sathya Perla5fb379e2009-06-18 00:02:59 +00002350 status = be_mcc_queues_create(adapter);
2351 if (status != 0)
2352 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002354 adapter->link_speed = -1;
2355
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356 return 0;
2357
Sathya Perla5fb379e2009-06-18 00:02:59 +00002358rx_qs_destroy:
2359 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002360tx_qs_destroy:
2361 be_tx_queues_destroy(adapter);
2362if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002363 if (be_physfn(adapter) && adapter->sriov_enabled)
2364 for (vf = 0; vf < num_vfs; vf++)
2365 if (adapter->vf_cfg[vf].vf_if_handle)
2366 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002367 adapter->vf_cfg[vf].vf_if_handle,
2368 vf + 1);
2369 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002370do_none:
2371 return status;
2372}
2373
Sathya Perla5fb379e2009-06-18 00:02:59 +00002374static int be_clear(struct be_adapter *adapter)
2375{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002376 int vf;
2377
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002378 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002379 be_vf_eth_addr_rem(adapter);
2380
Sathya Perla1a8887d2009-08-17 00:58:41 +00002381 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002382 be_rx_queues_destroy(adapter);
2383 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002384 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002385
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002386 if (be_physfn(adapter) && adapter->sriov_enabled)
2387 for (vf = 0; vf < num_vfs; vf++)
2388 if (adapter->vf_cfg[vf].vf_if_handle)
2389 be_cmd_if_destroy(adapter,
2390 adapter->vf_cfg[vf].vf_if_handle,
2391 vf + 1);
2392
Ajit Khaparde658681f2011-02-11 13:34:46 +00002393 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002394
Sathya Perla2243e2e2009-11-22 22:02:03 +00002395 /* tell fw we're done with firing cmds */
2396 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002397 return 0;
2398}
2399
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400
Ajit Khaparde84517482009-09-04 03:12:16 +00002401#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002402static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002403 const u8 *p, u32 img_start, int image_size,
2404 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002405{
2406 u32 crc_offset;
2407 u8 flashed_crc[4];
2408 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002409
2410 crc_offset = hdr_size + img_start + image_size - 4;
2411
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002412 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002413
2414 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002415 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002416 if (status) {
2417 dev_err(&adapter->pdev->dev,
2418 "could not get crc from flash, not flashing redboot\n");
2419 return false;
2420 }
2421
2422 /*update redboot only if crc does not match*/
2423 if (!memcmp(flashed_crc, p, 4))
2424 return false;
2425 else
2426 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002427}
2428
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002429static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002430 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002431 struct be_dma_mem *flash_cmd, int num_of_images)
2432
Ajit Khaparde84517482009-09-04 03:12:16 +00002433{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002434 int status = 0, i, filehdr_size = 0;
2435 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002436 int num_bytes;
2437 const u8 *p = fw->data;
2438 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002439 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002440 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002441
Joe Perches215faf92010-12-21 02:16:10 -08002442 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002443 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2444 FLASH_IMAGE_MAX_SIZE_g3},
2445 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2446 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2447 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2448 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2449 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2450 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2451 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2452 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2453 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2454 FLASH_IMAGE_MAX_SIZE_g3},
2455 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2456 FLASH_IMAGE_MAX_SIZE_g3},
2457 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002458 FLASH_IMAGE_MAX_SIZE_g3},
2459 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2460 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002461 };
Joe Perches215faf92010-12-21 02:16:10 -08002462 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002463 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2464 FLASH_IMAGE_MAX_SIZE_g2},
2465 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2466 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2467 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2468 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2469 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2470 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2471 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2472 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2473 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2474 FLASH_IMAGE_MAX_SIZE_g2},
2475 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2476 FLASH_IMAGE_MAX_SIZE_g2},
2477 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2478 FLASH_IMAGE_MAX_SIZE_g2}
2479 };
2480
2481 if (adapter->generation == BE_GEN3) {
2482 pflashcomp = gen3_flash_types;
2483 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002484 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002485 } else {
2486 pflashcomp = gen2_flash_types;
2487 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002488 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002489 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002490 for (i = 0; i < num_comp; i++) {
2491 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2492 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2493 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002494 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2495 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002496 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2497 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002498 continue;
2499 p = fw->data;
2500 p += filehdr_size + pflashcomp[i].offset
2501 + (num_of_images * sizeof(struct image_hdr));
2502 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002503 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002504 total_bytes = pflashcomp[i].size;
2505 while (total_bytes) {
2506 if (total_bytes > 32*1024)
2507 num_bytes = 32*1024;
2508 else
2509 num_bytes = total_bytes;
2510 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002511
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002512 if (!total_bytes)
2513 flash_op = FLASHROM_OPER_FLASH;
2514 else
2515 flash_op = FLASHROM_OPER_SAVE;
2516 memcpy(req->params.data_buf, p, num_bytes);
2517 p += num_bytes;
2518 status = be_cmd_write_flashrom(adapter, flash_cmd,
2519 pflashcomp[i].optype, flash_op, num_bytes);
2520 if (status) {
2521 dev_err(&adapter->pdev->dev,
2522 "cmd to write to flash rom failed.\n");
2523 return -1;
2524 }
2525 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002526 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002527 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002528 return 0;
2529}
2530
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002531static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2532{
2533 if (fhdr == NULL)
2534 return 0;
2535 if (fhdr->build[0] == '3')
2536 return BE_GEN3;
2537 else if (fhdr->build[0] == '2')
2538 return BE_GEN2;
2539 else
2540 return 0;
2541}
2542
Ajit Khaparde84517482009-09-04 03:12:16 +00002543int be_load_fw(struct be_adapter *adapter, u8 *func)
2544{
2545 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2546 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002547 struct flash_file_hdr_g2 *fhdr;
2548 struct flash_file_hdr_g3 *fhdr3;
2549 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002550 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002551 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002552 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002553
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002554 if (!netif_running(adapter->netdev)) {
2555 dev_err(&adapter->pdev->dev,
2556 "Firmware load not allowed (interface is down)\n");
2557 return -EPERM;
2558 }
2559
Ajit Khaparde84517482009-09-04 03:12:16 +00002560 strcpy(fw_file, func);
2561
2562 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2563 if (status)
2564 goto fw_exit;
2565
2566 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002567 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002568 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2569
Ajit Khaparde84517482009-09-04 03:12:16 +00002570 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002571 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2572 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002573 if (!flash_cmd.va) {
2574 status = -ENOMEM;
2575 dev_err(&adapter->pdev->dev,
2576 "Memory allocation failure while flashing\n");
2577 goto fw_exit;
2578 }
2579
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002580 if ((adapter->generation == BE_GEN3) &&
2581 (get_ufigen_type(fhdr) == BE_GEN3)) {
2582 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002583 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2584 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002585 img_hdr_ptr = (struct image_hdr *) (fw->data +
2586 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002587 i * sizeof(struct image_hdr)));
2588 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2589 status = be_flash_data(adapter, fw, &flash_cmd,
2590 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002591 }
2592 } else if ((adapter->generation == BE_GEN2) &&
2593 (get_ufigen_type(fhdr) == BE_GEN2)) {
2594 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2595 } else {
2596 dev_err(&adapter->pdev->dev,
2597 "UFI and Interface are not compatible for flashing\n");
2598 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002599 }
2600
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002601 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2602 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002603 if (status) {
2604 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2605 goto fw_exit;
2606 }
2607
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002608 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002609
2610fw_exit:
2611 release_firmware(fw);
2612 return status;
2613}
2614
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002615static struct net_device_ops be_netdev_ops = {
2616 .ndo_open = be_open,
2617 .ndo_stop = be_close,
2618 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002619 .ndo_set_rx_mode = be_set_multicast_list,
2620 .ndo_set_mac_address = be_mac_addr_set,
2621 .ndo_change_mtu = be_change_mtu,
2622 .ndo_validate_addr = eth_validate_addr,
2623 .ndo_vlan_rx_register = be_vlan_register,
2624 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2625 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002626 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002627 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002628 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002629 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630};
2631
2632static void be_netdev_init(struct net_device *netdev)
2633{
2634 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002635 struct be_rx_obj *rxo;
2636 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002638 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002639 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2640 NETIF_F_HW_VLAN_TX;
2641 if (be_multi_rxq(adapter))
2642 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002643
2644 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002645 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002646
Michał Mirosław79032642010-11-30 06:38:00 +00002647 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2648 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002649
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002650 if (lancer_chip(adapter))
2651 netdev->vlan_features |= NETIF_F_TSO6;
2652
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002653 netdev->flags |= IFF_MULTICAST;
2654
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002655 /* Default settings for Rx and Tx flow control */
2656 adapter->rx_fc = true;
2657 adapter->tx_fc = true;
2658
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002659 netif_set_gso_max_size(netdev, 65535);
2660
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002661 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2662
2663 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2664
Sathya Perla3abcded2010-10-03 22:12:27 -07002665 for_all_rx_queues(adapter, rxo, i)
2666 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2667 BE_NAPI_WEIGHT);
2668
Sathya Perla5fb379e2009-06-18 00:02:59 +00002669 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002670 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002671}
2672
2673static void be_unmap_pci_bars(struct be_adapter *adapter)
2674{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002675 if (adapter->csr)
2676 iounmap(adapter->csr);
2677 if (adapter->db)
2678 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002679 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002680 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002681}
2682
2683static int be_map_pci_bars(struct be_adapter *adapter)
2684{
2685 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002686 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002687
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002688 if (lancer_chip(adapter)) {
2689 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2690 pci_resource_len(adapter->pdev, 0));
2691 if (addr == NULL)
2692 return -ENOMEM;
2693 adapter->db = addr;
2694 return 0;
2695 }
2696
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002697 if (be_physfn(adapter)) {
2698 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2699 pci_resource_len(adapter->pdev, 2));
2700 if (addr == NULL)
2701 return -ENOMEM;
2702 adapter->csr = addr;
2703 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002704
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002705 if (adapter->generation == BE_GEN2) {
2706 pcicfg_reg = 1;
2707 db_reg = 4;
2708 } else {
2709 pcicfg_reg = 0;
2710 if (be_physfn(adapter))
2711 db_reg = 4;
2712 else
2713 db_reg = 0;
2714 }
2715 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2716 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002717 if (addr == NULL)
2718 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002719 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002720
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002721 if (be_physfn(adapter)) {
2722 addr = ioremap_nocache(
2723 pci_resource_start(adapter->pdev, pcicfg_reg),
2724 pci_resource_len(adapter->pdev, pcicfg_reg));
2725 if (addr == NULL)
2726 goto pci_map_err;
2727 adapter->pcicfg = addr;
2728 } else
2729 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002730
2731 return 0;
2732pci_map_err:
2733 be_unmap_pci_bars(adapter);
2734 return -ENOMEM;
2735}
2736
2737
2738static void be_ctrl_cleanup(struct be_adapter *adapter)
2739{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002740 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002741
2742 be_unmap_pci_bars(adapter);
2743
2744 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002745 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2746 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002747
2748 mem = &adapter->mc_cmd_mem;
2749 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002750 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2751 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002752}
2753
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754static int be_ctrl_init(struct be_adapter *adapter)
2755{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002756 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2757 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002758 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002759 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002760
2761 status = be_map_pci_bars(adapter);
2762 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002763 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002764
2765 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002766 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2767 mbox_mem_alloc->size,
2768 &mbox_mem_alloc->dma,
2769 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002770 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002771 status = -ENOMEM;
2772 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002773 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002774
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002775 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2776 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2778 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002779
2780 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002781 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2782 mc_cmd_mem->size, &mc_cmd_mem->dma,
2783 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002784 if (mc_cmd_mem->va == NULL) {
2785 status = -ENOMEM;
2786 goto free_mbox;
2787 }
2788 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2789
Ivan Vecera29849612010-12-14 05:43:19 +00002790 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002791 spin_lock_init(&adapter->mcc_lock);
2792 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002793
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002794 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002795 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002796 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002797
2798free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002799 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2800 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002801
2802unmap_pci_bars:
2803 be_unmap_pci_bars(adapter);
2804
2805done:
2806 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002807}
2808
2809static void be_stats_cleanup(struct be_adapter *adapter)
2810{
Sathya Perla3abcded2010-10-03 22:12:27 -07002811 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002812
2813 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002814 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2815 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002816}
2817
2818static int be_stats_init(struct be_adapter *adapter)
2819{
Sathya Perla3abcded2010-10-03 22:12:27 -07002820 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002821
2822 cmd->size = sizeof(struct be_cmd_req_get_stats);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002823 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2824 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002825 if (cmd->va == NULL)
2826 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002827 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002828 return 0;
2829}
2830
2831static void __devexit be_remove(struct pci_dev *pdev)
2832{
2833 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002834
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002835 if (!adapter)
2836 return;
2837
Somnath Koturf203af72010-10-25 23:01:03 +00002838 cancel_delayed_work_sync(&adapter->work);
2839
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002840 unregister_netdev(adapter->netdev);
2841
Sathya Perla5fb379e2009-06-18 00:02:59 +00002842 be_clear(adapter);
2843
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002844 be_stats_cleanup(adapter);
2845
2846 be_ctrl_cleanup(adapter);
2847
Ajit Khaparde48f5a192011-04-06 18:08:30 +00002848 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002849 be_sriov_disable(adapter);
2850
Sathya Perla8d56ff12009-11-22 22:02:26 +00002851 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002852
2853 pci_set_drvdata(pdev, NULL);
2854 pci_release_regions(pdev);
2855 pci_disable_device(pdev);
2856
2857 free_netdev(adapter->netdev);
2858}
2859
Sathya Perla2243e2e2009-11-22 22:02:03 +00002860static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002861{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002862 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002863 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002864
Sathya Perla8788fdc2009-07-27 22:52:03 +00002865 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002866 if (status)
2867 return status;
2868
Sathya Perla3abcded2010-10-03 22:12:27 -07002869 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2870 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002871 if (status)
2872 return status;
2873
2874 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002875
2876 if (be_physfn(adapter)) {
2877 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002878 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002879
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002880 if (status)
2881 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002882
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002883 if (!is_valid_ether_addr(mac))
2884 return -EADDRNOTAVAIL;
2885
2886 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2887 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2888 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002889
Ajit Khaparde3486be22010-07-23 02:04:54 +00002890 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002891 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2892 else
2893 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2894
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002895 status = be_cmd_get_cntl_attributes(adapter);
2896 if (status)
2897 return status;
2898
Sathya Perla2e588f82011-03-11 02:49:26 +00002899 be_cmd_check_native_mode(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002900 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002901}
2902
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002903static int be_dev_family_check(struct be_adapter *adapter)
2904{
2905 struct pci_dev *pdev = adapter->pdev;
2906 u32 sli_intf = 0, if_type;
2907
2908 switch (pdev->device) {
2909 case BE_DEVICE_ID1:
2910 case OC_DEVICE_ID1:
2911 adapter->generation = BE_GEN2;
2912 break;
2913 case BE_DEVICE_ID2:
2914 case OC_DEVICE_ID2:
2915 adapter->generation = BE_GEN3;
2916 break;
2917 case OC_DEVICE_ID3:
2918 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2919 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2920 SLI_INTF_IF_TYPE_SHIFT;
2921
2922 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2923 if_type != 0x02) {
2924 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2925 return -EINVAL;
2926 }
2927 if (num_vfs > 0) {
2928 dev_err(&pdev->dev, "VFs not supported\n");
2929 return -EINVAL;
2930 }
2931 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2932 SLI_INTF_FAMILY_SHIFT);
2933 adapter->generation = BE_GEN3;
2934 break;
2935 default:
2936 adapter->generation = 0;
2937 }
2938 return 0;
2939}
2940
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00002941static int lancer_wait_ready(struct be_adapter *adapter)
2942{
2943#define SLIPORT_READY_TIMEOUT 500
2944 u32 sliport_status;
2945 int status = 0, i;
2946
2947 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2948 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2949 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2950 break;
2951
2952 msleep(20);
2953 }
2954
2955 if (i == SLIPORT_READY_TIMEOUT)
2956 status = -1;
2957
2958 return status;
2959}
2960
2961static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2962{
2963 int status;
2964 u32 sliport_status, err, reset_needed;
2965 status = lancer_wait_ready(adapter);
2966 if (!status) {
2967 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2968 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2969 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2970 if (err && reset_needed) {
2971 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2972 adapter->db + SLIPORT_CONTROL_OFFSET);
2973
2974 /* check adapter has corrected the error */
2975 status = lancer_wait_ready(adapter);
2976 sliport_status = ioread32(adapter->db +
2977 SLIPORT_STATUS_OFFSET);
2978 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2979 SLIPORT_STATUS_RN_MASK);
2980 if (status || sliport_status)
2981 status = -1;
2982 } else if (err || reset_needed) {
2983 status = -1;
2984 }
2985 }
2986 return status;
2987}
2988
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002989static int __devinit be_probe(struct pci_dev *pdev,
2990 const struct pci_device_id *pdev_id)
2991{
2992 int status = 0;
2993 struct be_adapter *adapter;
2994 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002995
2996 status = pci_enable_device(pdev);
2997 if (status)
2998 goto do_none;
2999
3000 status = pci_request_regions(pdev, DRV_NAME);
3001 if (status)
3002 goto disable_dev;
3003 pci_set_master(pdev);
3004
3005 netdev = alloc_etherdev(sizeof(struct be_adapter));
3006 if (netdev == NULL) {
3007 status = -ENOMEM;
3008 goto rel_reg;
3009 }
3010 adapter = netdev_priv(netdev);
3011 adapter->pdev = pdev;
3012 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003013
3014 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003015 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003016 goto free_netdev;
3017
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003018 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003019 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003020
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003021 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003022 if (!status) {
3023 netdev->features |= NETIF_F_HIGHDMA;
3024 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003025 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003026 if (status) {
3027 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3028 goto free_netdev;
3029 }
3030 }
3031
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003032 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003033 if (adapter->sriov_enabled) {
3034 adapter->vf_cfg = kcalloc(num_vfs,
3035 sizeof(struct be_vf_cfg), GFP_KERNEL);
3036
3037 if (!adapter->vf_cfg)
3038 goto free_netdev;
3039 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003040
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003041 status = be_ctrl_init(adapter);
3042 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003043 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003044
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003045 if (lancer_chip(adapter)) {
3046 status = lancer_test_and_set_rdy_state(adapter);
3047 if (status) {
3048 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003049 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003050 }
3051 }
3052
Sathya Perla2243e2e2009-11-22 22:02:03 +00003053 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003054 if (be_physfn(adapter)) {
3055 status = be_cmd_POST(adapter);
3056 if (status)
3057 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003058 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003059
3060 /* tell fw we're ready to fire cmds */
3061 status = be_cmd_fw_init(adapter);
3062 if (status)
3063 goto ctrl_clean;
3064
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003065 status = be_cmd_reset_function(adapter);
3066 if (status)
3067 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003068
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003069 status = be_stats_init(adapter);
3070 if (status)
3071 goto ctrl_clean;
3072
Sathya Perla2243e2e2009-11-22 22:02:03 +00003073 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003074 if (status)
3075 goto stats_clean;
3076
Sathya Perla3abcded2010-10-03 22:12:27 -07003077 be_msix_enable(adapter);
3078
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003079 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080
Sathya Perla5fb379e2009-06-18 00:02:59 +00003081 status = be_setup(adapter);
3082 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003083 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003084
Sathya Perla3abcded2010-10-03 22:12:27 -07003085 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086 status = register_netdev(netdev);
3087 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003088 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003089 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090
Ajit Khapardee6319362011-02-11 13:35:41 +00003091 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003092 u8 mac_speed;
3093 bool link_up;
3094 u16 vf, lnk_speed;
3095
Ajit Khapardee6319362011-02-11 13:35:41 +00003096 status = be_vf_eth_addr_config(adapter);
3097 if (status)
3098 goto unreg_netdev;
Ajit Khaparded0381c42011-04-19 12:11:55 +00003099
3100 for (vf = 0; vf < num_vfs; vf++) {
3101 status = be_cmd_link_status_query(adapter, &link_up,
3102 &mac_speed, &lnk_speed, vf + 1);
3103 if (!status)
3104 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3105 else
3106 goto unreg_netdev;
3107 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003108 }
3109
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003110 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00003111 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003112 return 0;
3113
Ajit Khapardee6319362011-02-11 13:35:41 +00003114unreg_netdev:
3115 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003116unsetup:
3117 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003118msix_disable:
3119 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120stats_clean:
3121 be_stats_cleanup(adapter);
3122ctrl_clean:
3123 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003124free_vf_cfg:
3125 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003127 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003128 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003129 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003130rel_reg:
3131 pci_release_regions(pdev);
3132disable_dev:
3133 pci_disable_device(pdev);
3134do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003135 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003136 return status;
3137}
3138
3139static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3140{
3141 struct be_adapter *adapter = pci_get_drvdata(pdev);
3142 struct net_device *netdev = adapter->netdev;
3143
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003144 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003145 if (adapter->wol)
3146 be_setup_wol(adapter, true);
3147
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148 netif_device_detach(netdev);
3149 if (netif_running(netdev)) {
3150 rtnl_lock();
3151 be_close(netdev);
3152 rtnl_unlock();
3153 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003154 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003155 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003156
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003157 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003158 pci_save_state(pdev);
3159 pci_disable_device(pdev);
3160 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3161 return 0;
3162}
3163
3164static int be_resume(struct pci_dev *pdev)
3165{
3166 int status = 0;
3167 struct be_adapter *adapter = pci_get_drvdata(pdev);
3168 struct net_device *netdev = adapter->netdev;
3169
3170 netif_device_detach(netdev);
3171
3172 status = pci_enable_device(pdev);
3173 if (status)
3174 return status;
3175
3176 pci_set_power_state(pdev, 0);
3177 pci_restore_state(pdev);
3178
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003179 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003180 /* tell fw we're ready to fire cmds */
3181 status = be_cmd_fw_init(adapter);
3182 if (status)
3183 return status;
3184
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003185 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003186 if (netif_running(netdev)) {
3187 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003188 be_open(netdev);
3189 rtnl_unlock();
3190 }
3191 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003192
3193 if (adapter->wol)
3194 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003195
3196 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003197 return 0;
3198}
3199
Sathya Perla82456b02010-02-17 01:35:37 +00003200/*
3201 * An FLR will stop BE from DMAing any data.
3202 */
3203static void be_shutdown(struct pci_dev *pdev)
3204{
3205 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003206
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003207 if (!adapter)
3208 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003209
Sathya Perla0f4a6822011-03-21 20:49:28 +00003210 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003211
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003212 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003213
Sathya Perla82456b02010-02-17 01:35:37 +00003214 if (adapter->wol)
3215 be_setup_wol(adapter, true);
3216
Ajit Khaparde57841862011-04-06 18:08:43 +00003217 be_cmd_reset_function(adapter);
3218
Sathya Perla82456b02010-02-17 01:35:37 +00003219 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003220}
3221
Sathya Perlacf588472010-02-14 21:22:01 +00003222static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3223 pci_channel_state_t state)
3224{
3225 struct be_adapter *adapter = pci_get_drvdata(pdev);
3226 struct net_device *netdev = adapter->netdev;
3227
3228 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3229
3230 adapter->eeh_err = true;
3231
3232 netif_device_detach(netdev);
3233
3234 if (netif_running(netdev)) {
3235 rtnl_lock();
3236 be_close(netdev);
3237 rtnl_unlock();
3238 }
3239 be_clear(adapter);
3240
3241 if (state == pci_channel_io_perm_failure)
3242 return PCI_ERS_RESULT_DISCONNECT;
3243
3244 pci_disable_device(pdev);
3245
3246 return PCI_ERS_RESULT_NEED_RESET;
3247}
3248
3249static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3250{
3251 struct be_adapter *adapter = pci_get_drvdata(pdev);
3252 int status;
3253
3254 dev_info(&adapter->pdev->dev, "EEH reset\n");
3255 adapter->eeh_err = false;
3256
3257 status = pci_enable_device(pdev);
3258 if (status)
3259 return PCI_ERS_RESULT_DISCONNECT;
3260
3261 pci_set_master(pdev);
3262 pci_set_power_state(pdev, 0);
3263 pci_restore_state(pdev);
3264
3265 /* Check if card is ok and fw is ready */
3266 status = be_cmd_POST(adapter);
3267 if (status)
3268 return PCI_ERS_RESULT_DISCONNECT;
3269
3270 return PCI_ERS_RESULT_RECOVERED;
3271}
3272
3273static void be_eeh_resume(struct pci_dev *pdev)
3274{
3275 int status = 0;
3276 struct be_adapter *adapter = pci_get_drvdata(pdev);
3277 struct net_device *netdev = adapter->netdev;
3278
3279 dev_info(&adapter->pdev->dev, "EEH resume\n");
3280
3281 pci_save_state(pdev);
3282
3283 /* tell fw we're ready to fire cmds */
3284 status = be_cmd_fw_init(adapter);
3285 if (status)
3286 goto err;
3287
3288 status = be_setup(adapter);
3289 if (status)
3290 goto err;
3291
3292 if (netif_running(netdev)) {
3293 status = be_open(netdev);
3294 if (status)
3295 goto err;
3296 }
3297 netif_device_attach(netdev);
3298 return;
3299err:
3300 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003301}
3302
3303static struct pci_error_handlers be_eeh_handlers = {
3304 .error_detected = be_eeh_err_detected,
3305 .slot_reset = be_eeh_reset,
3306 .resume = be_eeh_resume,
3307};
3308
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309static struct pci_driver be_driver = {
3310 .name = DRV_NAME,
3311 .id_table = be_dev_ids,
3312 .probe = be_probe,
3313 .remove = be_remove,
3314 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003315 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003316 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003317 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003318};
3319
3320static int __init be_init_module(void)
3321{
Joe Perches8e95a202009-12-03 07:58:21 +00003322 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3323 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003324 printk(KERN_WARNING DRV_NAME
3325 " : Module param rx_frag_size must be 2048/4096/8192."
3326 " Using 2048\n");
3327 rx_frag_size = 2048;
3328 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003329
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003330 return pci_register_driver(&be_driver);
3331}
3332module_init(be_init_module);
3333
3334static void __exit be_exit_module(void)
3335{
3336 pci_unregister_driver(&be_driver);
3337}
3338module_exit(be_exit_module);