blob: 35294005361b9771746bd3b2786e675512009aea [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
Sathya Perla2e588f82011-03-11 02:49:26 +000028static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000030module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000149
Sathya Perlacf588472010-02-14 21:22:01 +0000150 if (adapter->eeh_err)
151 return;
152
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 iowrite32(reg, addr);
161}
162
Sathya Perla8788fdc2009-07-27 22:52:03 +0000163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000168
169 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171}
172
Sathya Perla8788fdc2009-07-27 22:52:03 +0000173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000178
179 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700181}
182
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000190
191 if (adapter->eeh_err)
192 return;
193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201}
202
Sathya Perla8788fdc2009-07-27 22:52:03 +0000203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000209
210 if (adapter->eeh_err)
211 return;
212
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
Ajit Khapardef8617e02011-02-11 13:36:37 +0000234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000236 if (status)
237 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700238
Sathya Perlaa65027e2009-08-17 00:58:04 +0000239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000240 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000241netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
Sathya Perlab31c50a2009-09-17 10:30:13 -0700248void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700249{
Sathya Perla3abcded2010-10-03 22:12:27 -0700250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000255 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700256 struct be_rx_obj *rxo;
257 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258
Sathya Perla3abcded2010-10-03 22:12:27 -0700259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 }
268
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700271
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700286
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
291
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000296
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700302}
303
Sathya Perla8788fdc2009-07-27 22:52:03 +0000304void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700305{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 struct net_device *netdev = adapter->netdev;
307
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000309 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000310 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000311 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000314 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700317 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000318 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700319 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700324{
Sathya Perla3abcded2010-10-03 22:12:27 -0700325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700338
339 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700340 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700341 return;
342
Sathya Perla3abcded2010-10-03 22:12:27 -0700343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700344 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700345
Sathya Perla4097f662009-03-24 16:40:13 -0700346 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700358
359 rx_eq->cur_eqd = eqd;
360}
361
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
Sathya Perla4097f662009-03-24 16:40:13 -0700373static void be_tx_rate_update(struct be_adapter *adapter)
374{
Sathya Perla3abcded2010-10-03 22:12:27 -0700375 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700394static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700396{
Sathya Perla3abcded2010-10-03 22:12:27 -0700397 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700402 if (stopped)
403 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000407static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700410 int cnt = (skb->len > skb->data_len);
411
412 cnt += skb_shinfo(skb)->nr_frags;
413
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700414 /* to account for hdr wrb */
415 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000416 if (lancer_chip(adapter) || !(cnt & 1)) {
417 *dummy = false;
418 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700419 /* add a dummy to make it an even num */
420 cnt++;
421 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000422 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424 return cnt;
425}
426
427static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428{
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432}
433
Somnath Koturcc4ce022010-10-21 07:11:14 -0700434static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700436{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700437 u8 vlan_prio = 0;
438 u16 vlan_tag = 0;
439
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700440 memset(hdr, 0, sizeof(*hdr));
441
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000444 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453 if (is_tcp_pkt(skb))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455 tcpcs, hdr, 1);
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 udpcs, hdr, 1);
459 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461 if (is_tcp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465 }
466
Somnath Koturcc4ce022010-10-21 07:11:14 -0700467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476 }
477
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482}
483
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000484static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000485 bool unmap_single)
486{
487 dma_addr_t dma;
488
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000492 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000493 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000494 dma_unmap_single(dev, dma, wrb->frag_len,
495 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000496 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000498 }
499}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500
501static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503{
Sathya Perla7101e112010-03-22 20:41:12 +0000504 dma_addr_t busaddr;
505 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000506 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000511 bool map_single = false;
512 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700514 hdr = queue_head_node(txq);
515 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000516 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517
David S. Millerebc8d2a2009-06-09 01:01:31 -0700518 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700519 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000522 goto dma_err;
523 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527 queue_head_inc(txq);
528 copied += len;
529 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530
David S. Millerebc8d2a2009-06-09 01:01:31 -0700531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000537 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541 queue_head_inc(txq);
542 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 }
544
545 if (dummy_wrb) {
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, 0, 0);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 }
551
Somnath Koturcc4ce022010-10-21 07:11:14 -0700552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000556dma_err:
557 txq->head = map_head;
558 while (copied) {
559 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000560 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000561 map_single = false;
562 copied -= wrb->frag_len;
563 queue_head_inc(txq);
564 }
565 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700566}
567
Stephen Hemminger613573252009-08-31 19:50:58 +0000568static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700569 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700570{
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
577
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000581 if (copied) {
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
589 */
Sathya Perla7101e112010-03-22 20:41:12 +0000590 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592 txq->len) {
593 netif_stop_queue(netdev);
594 stopped = true;
595 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000597 be_txq_notify(adapter, txq->id, wrb_cnt);
598
Ajit Khaparde91992e42010-02-19 13:57:12 +0000599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000601 } else {
602 txq->head = start;
603 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 return NETDEV_TX_OK;
606}
607
608static int be_change_mtu(struct net_device *netdev, int new_mtu)
609{
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000616 BE_MIN_MTU,
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 return -EINVAL;
619 }
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
623 return 0;
624}
625
626/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000630static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
633 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000634 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000635 u32 if_handle;
636
637 if (vf) {
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642
Ajit Khaparde82903e42010-02-09 01:34:57 +0000643 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000645 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
648 ntags++;
649 }
650 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000657
Sathya Perlab31c50a2009-09-17 10:30:13 -0700658 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659}
660
661static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662{
663 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666}
667
668static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669{
670 struct be_adapter *adapter = netdev_priv(netdev);
671
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000672 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000673 if (!be_physfn(adapter))
674 return;
675
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000678 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
681static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682{
683 struct be_adapter *adapter = netdev_priv(netdev);
684
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000688 if (!be_physfn(adapter))
689 return;
690
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000692 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000693 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694}
695
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696static void be_set_multicast_list(struct net_device *netdev)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699
700 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000701 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000702 adapter->promiscuous = true;
703 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000705
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300706 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000709 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000710 }
711
Sathya Perlae7b909a2009-11-22 22:01:10 +0000712 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000716 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000717 goto done;
718 }
719
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800721 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000722done:
723 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724}
725
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000726static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 int status;
730
731 if (!adapter->sriov_enabled)
732 return -EPERM;
733
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735 return -EINVAL;
736
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000741
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000745
746 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000749 else
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000752 return status;
753}
754
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000755static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
757{
758 struct be_adapter *adapter = netdev_priv(netdev);
759
760 if (!adapter->sriov_enabled)
761 return -EPERM;
762
763 if (vf >= num_vfs)
764 return -EINVAL;
765
766 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000769 vi->qos = 0;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772 return 0;
773}
774
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000775static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
777{
778 struct be_adapter *adapter = netdev_priv(netdev);
779 int status = 0;
780
781 if (!adapter->sriov_enabled)
782 return -EPERM;
783
784 if ((vf >= num_vfs) || (vlan > 4095))
785 return -EINVAL;
786
787 if (vlan) {
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
790 } else {
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
793 }
794
795 status = be_vid_config(adapter, true, vf);
796
797 if (status)
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
800 return status;
801}
802
Ajit Khapardee1d18732010-07-23 01:52:13 +0000803static int be_set_vf_tx_rate(struct net_device *netdev,
804 int vf, int rate)
805{
806 struct be_adapter *adapter = netdev_priv(netdev);
807 int status = 0;
808
809 if (!adapter->sriov_enabled)
810 return -EPERM;
811
812 if ((vf >= num_vfs) || (rate < 0))
813 return -EINVAL;
814
815 if (rate > 10000)
816 rate = 10000;
817
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000820
821 if (status)
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
824 return status;
825}
826
Sathya Perla3abcded2010-10-03 22:12:27 -0700827static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828{
Sathya Perla3abcded2010-10-03 22:12:27 -0700829 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700830 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831
Sathya Perla4097f662009-03-24 16:40:13 -0700832 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700835 return;
836 }
837
838 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700839 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700840 return;
841
Sathya Perla3abcded2010-10-03 22:12:27 -0700842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700846}
847
Sathya Perla3abcded2010-10-03 22:12:27 -0700848static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000849 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700850{
Sathya Perla3abcded2010-10-03 22:12:27 -0700851 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700852
Sathya Perla3abcded2010-10-03 22:12:27 -0700853 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700856 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700858 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000859 if (rxcp->err)
860 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861}
862
Sathya Perla2e588f82011-03-11 02:49:26 +0000863static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700864{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700869}
870
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700871static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700872get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
874 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875{
876 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700877 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878
Sathya Perla3abcded2010-10-03 22:12:27 -0700879 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880 BUG_ON(!rx_page_info->page);
881
Ajit Khaparde205859a2010-02-09 01:34:21 +0000882 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000886 rx_page_info->last_page_user = false;
887 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888
889 atomic_dec(&rxq->used);
890 return rx_page_info;
891}
892
893/* Throwaway the data in the Rx completion */
894static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700895 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000896 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897{
Sathya Perla3abcded2010-10-03 22:12:27 -0700898 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +0000900 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +0000902 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +0000903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +0000904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +0000906 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 }
908}
909
910/*
911 * skb_fill_rx_data forms a complete skb for an ether frame
912 * indicated by rxcp.
913 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700914static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700916{
Sathya Perla3abcded2010-10-03 22:12:27 -0700917 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +0000919 u16 i, j;
920 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700921 u8 *start;
922
Sathya Perla2e588f82011-03-11 02:49:26 +0000923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700924 start = page_address(page_info->page) + page_info->page_offset;
925 prefetch(start);
926
927 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +0000928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700929
930 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +0000931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
937 skb->data_len = 0;
938 skb->tail += curr_frag_len;
939 } else {
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
947 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000948 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700949
Sathya Perla2e588f82011-03-11 02:49:26 +0000950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
952 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700953 }
954
955 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +0000956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700978
Sathya Perla2e588f82011-03-11 02:49:26 +0000979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000981 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000983 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984}
985
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700986/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700988 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000989 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700990{
Michał Mirosław6332c8d2011-04-07 02:43:48 +0000991 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700992 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +0000993
Michał Mirosław6332c8d2011-04-07 02:43:48 +0000994 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +0000995 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996 if (net_ratelimit())
997 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -0700998 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999 return;
1000 }
1001
Sathya Perla2e588f82011-03-11 02:49:26 +00001002 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001003
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001004 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001005 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001006 else
1007 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008
1009 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001010 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001011 if (adapter->netdev->features & NETIF_F_RXHASH)
1012 skb->rxhash = rxcp->rss_hash;
1013
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014
Sathya Perla2e588f82011-03-11 02:49:26 +00001015 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001016 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001017 kfree_skb(skb);
1018 return;
1019 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001020 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001021 } else {
1022 netif_receive_skb(skb);
1023 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001024}
1025
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001026/* Process the RX completion indicated by rxcp when GRO is enabled */
1027static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001028 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001029 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030{
1031 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001032 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001033 struct be_queue_info *rxq = &rxo->q;
1034 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001035 u16 remaining, curr_frag_len;
1036 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001037
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001038 skb = napi_get_frags(&eq_obj->napi);
1039 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001040 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001041 return;
1042 }
1043
Sathya Perla2e588f82011-03-11 02:49:26 +00001044 remaining = rxcp->pkt_size;
1045 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1046 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001047
1048 curr_frag_len = min(remaining, rx_frag_size);
1049
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001050 /* Coalesce all frags from the same physical page in one slot */
1051 if (i == 0 || page_info->page_offset == 0) {
1052 /* First frag or Fresh page */
1053 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001054 skb_shinfo(skb)->frags[j].page = page_info->page;
1055 skb_shinfo(skb)->frags[j].page_offset =
1056 page_info->page_offset;
1057 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001058 } else {
1059 put_page(page_info->page);
1060 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001061 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001062
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001064 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065 memset(page_info, 0, sizeof(*page_info));
1066 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001067 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001069 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001070 skb->len = rxcp->pkt_size;
1071 skb->data_len = rxcp->pkt_size;
1072 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001073 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001074 if (adapter->netdev->features & NETIF_F_RXHASH)
1075 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001076
Sathya Perla2e588f82011-03-11 02:49:26 +00001077 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001078 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001079 else
1080 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081}
1082
Sathya Perla2e588f82011-03-11 02:49:26 +00001083static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1084 struct be_eth_rx_compl *compl,
1085 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086{
Sathya Perla2e588f82011-03-11 02:49:26 +00001087 rxcp->pkt_size =
1088 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1089 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1090 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1091 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001092 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001093 rxcp->ip_csum =
1094 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1095 rxcp->l4_csum =
1096 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1097 rxcp->ipv6 =
1098 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1099 rxcp->rxq_idx =
1100 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1101 rxcp->num_rcvd =
1102 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1103 rxcp->pkt_type =
1104 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001105 rxcp->rss_hash =
1106 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001107 if (rxcp->vlanf) {
1108 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1109 compl);
1110 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1111 compl);
1112 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001113}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001114
Sathya Perla2e588f82011-03-11 02:49:26 +00001115static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1116 struct be_eth_rx_compl *compl,
1117 struct be_rx_compl_info *rxcp)
1118{
1119 rxcp->pkt_size =
1120 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1121 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1122 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1123 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001124 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001125 rxcp->ip_csum =
1126 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1127 rxcp->l4_csum =
1128 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1129 rxcp->ipv6 =
1130 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1131 rxcp->rxq_idx =
1132 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1133 rxcp->num_rcvd =
1134 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1135 rxcp->pkt_type =
1136 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001137 rxcp->rss_hash =
1138 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001139 if (rxcp->vlanf) {
1140 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1141 compl);
1142 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1143 compl);
1144 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001145}
1146
1147static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1148{
1149 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1150 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1151 struct be_adapter *adapter = rxo->adapter;
1152
1153 /* For checking the valid bit it is Ok to use either definition as the
1154 * valid bit is at the same position in both v0 and v1 Rx compl */
1155 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156 return NULL;
1157
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001158 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001159 be_dws_le_to_cpu(compl, sizeof(*compl));
1160
1161 if (adapter->be3_native)
1162 be_parse_rx_compl_v1(adapter, compl, rxcp);
1163 else
1164 be_parse_rx_compl_v0(adapter, compl, rxcp);
1165
Sathya Perla15d72182011-03-21 20:49:26 +00001166 if (rxcp->vlanf) {
1167 /* vlanf could be wrongly set in some cards.
1168 * ignore if vtm is not set */
1169 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1170 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001171
Sathya Perla15d72182011-03-21 20:49:26 +00001172 if (!lancer_chip(adapter))
1173 rxcp->vid = swab16(rxcp->vid);
Sathya Perla2e588f82011-03-11 02:49:26 +00001174
Sathya Perla15d72182011-03-21 20:49:26 +00001175 if ((adapter->pvid == rxcp->vid) &&
1176 !adapter->vlan_tag[rxcp->vid])
1177 rxcp->vlanf = 0;
1178 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001179
1180 /* As the compl has been parsed, reset it; we wont touch it again */
1181 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182
Sathya Perla3abcded2010-10-03 22:12:27 -07001183 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 return rxcp;
1185}
1186
Eric Dumazet1829b082011-03-01 05:48:12 +00001187static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001190
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001192 gfp |= __GFP_COMP;
1193 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194}
1195
1196/*
1197 * Allocate a page, split it to fragments of size rx_frag_size and post as
1198 * receive buffers to BE
1199 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001200static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201{
Sathya Perla3abcded2010-10-03 22:12:27 -07001202 struct be_adapter *adapter = rxo->adapter;
1203 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001204 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001205 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206 struct page *pagep = NULL;
1207 struct be_eth_rx_d *rxd;
1208 u64 page_dmaaddr = 0, frag_dmaaddr;
1209 u32 posted, page_offset = 0;
1210
Sathya Perla3abcded2010-10-03 22:12:27 -07001211 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1213 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001214 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001216 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217 break;
1218 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001219 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1220 0, adapter->big_page_size,
1221 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 page_info->page_offset = 0;
1223 } else {
1224 get_page(pagep);
1225 page_info->page_offset = page_offset + rx_frag_size;
1226 }
1227 page_offset = page_info->page_offset;
1228 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001229 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1231
1232 rxd = queue_head_node(rxq);
1233 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1234 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235
1236 /* Any space left in the current big page for another frag? */
1237 if ((page_offset + rx_frag_size + rx_frag_size) >
1238 adapter->big_page_size) {
1239 pagep = NULL;
1240 page_info->last_page_user = true;
1241 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001242
1243 prev_page_info = page_info;
1244 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 page_info = &page_info_tbl[rxq->head];
1246 }
1247 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001248 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249
1250 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001252 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001253 } else if (atomic_read(&rxq->used) == 0) {
1254 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001255 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257}
1258
Sathya Perla5fb379e2009-06-18 00:02:59 +00001259static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1262
1263 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1264 return NULL;
1265
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001266 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1268
1269 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1270
1271 queue_tail_inc(tx_cq);
1272 return txcp;
1273}
1274
1275static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1276{
1277 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001278 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1280 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001281 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1282 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001284 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001285 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001286 sent_skbs[txq->tail] = NULL;
1287
1288 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001289 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001291 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001293 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001294 unmap_tx_frag(&adapter->pdev->dev, wrb,
1295 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001296 unmap_skb_hdr = false;
1297
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001298 num_wrbs++;
1299 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001300 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301
1302 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001303
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304 kfree_skb(sent_skb);
1305}
1306
Sathya Perla859b1e42009-08-10 03:43:51 +00001307static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1308{
1309 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1310
1311 if (!eqe->evt)
1312 return NULL;
1313
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001314 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001315 eqe->evt = le32_to_cpu(eqe->evt);
1316 queue_tail_inc(&eq_obj->q);
1317 return eqe;
1318}
1319
1320static int event_handle(struct be_adapter *adapter,
1321 struct be_eq_obj *eq_obj)
1322{
1323 struct be_eq_entry *eqe;
1324 u16 num = 0;
1325
1326 while ((eqe = event_get(eq_obj)) != NULL) {
1327 eqe->evt = 0;
1328 num++;
1329 }
1330
1331 /* Deal with any spurious interrupts that come
1332 * without events
1333 */
1334 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1335 if (num)
1336 napi_schedule(&eq_obj->napi);
1337
1338 return num;
1339}
1340
1341/* Just read and notify events without processing them.
1342 * Used at the time of destroying event queues */
1343static void be_eq_clean(struct be_adapter *adapter,
1344 struct be_eq_obj *eq_obj)
1345{
1346 struct be_eq_entry *eqe;
1347 u16 num = 0;
1348
1349 while ((eqe = event_get(eq_obj)) != NULL) {
1350 eqe->evt = 0;
1351 num++;
1352 }
1353
1354 if (num)
1355 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1356}
1357
Sathya Perla3abcded2010-10-03 22:12:27 -07001358static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359{
1360 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001361 struct be_queue_info *rxq = &rxo->q;
1362 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001363 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364 u16 tail;
1365
1366 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001367 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1368 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001369 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 }
1371
1372 /* Then free posted rx buffer that were not used */
1373 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001374 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001375 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376 put_page(page_info->page);
1377 memset(page_info, 0, sizeof(*page_info));
1378 }
1379 BUG_ON(atomic_read(&rxq->used));
1380}
1381
Sathya Perlaa8e91792009-08-10 03:42:43 +00001382static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001384 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001386 struct be_eth_tx_compl *txcp;
1387 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001388 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1389 struct sk_buff *sent_skb;
1390 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391
Sathya Perlaa8e91792009-08-10 03:42:43 +00001392 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1393 do {
1394 while ((txcp = be_tx_compl_get(tx_cq))) {
1395 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1396 wrb_index, txcp);
1397 be_tx_compl_process(adapter, end_idx);
1398 cmpl++;
1399 }
1400 if (cmpl) {
1401 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1402 cmpl = 0;
1403 }
1404
1405 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1406 break;
1407
1408 mdelay(1);
1409 } while (true);
1410
1411 if (atomic_read(&txq->used))
1412 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1413 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001414
1415 /* free posted tx for which compls will never arrive */
1416 while (atomic_read(&txq->used)) {
1417 sent_skb = sent_skbs[txq->tail];
1418 end_idx = txq->tail;
1419 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001420 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1421 txq->len);
Sathya Perlab03388d2010-02-18 00:37:17 +00001422 be_tx_compl_process(adapter, end_idx);
1423 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424}
1425
Sathya Perla5fb379e2009-06-18 00:02:59 +00001426static void be_mcc_queues_destroy(struct be_adapter *adapter)
1427{
1428 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001429
Sathya Perla8788fdc2009-07-27 22:52:03 +00001430 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001431 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001432 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001433 be_queue_free(adapter, q);
1434
Sathya Perla8788fdc2009-07-27 22:52:03 +00001435 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001436 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001437 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001438 be_queue_free(adapter, q);
1439}
1440
1441/* Must be called only after TX qs are created as MCC shares TX EQ */
1442static int be_mcc_queues_create(struct be_adapter *adapter)
1443{
1444 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001445
1446 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001447 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001448 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001449 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001450 goto err;
1451
1452 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001453 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001454 goto mcc_cq_free;
1455
1456 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001457 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001458 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1459 goto mcc_cq_destroy;
1460
1461 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001462 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001463 goto mcc_q_free;
1464
1465 return 0;
1466
1467mcc_q_free:
1468 be_queue_free(adapter, q);
1469mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001470 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001471mcc_cq_free:
1472 be_queue_free(adapter, cq);
1473err:
1474 return -1;
1475}
1476
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477static void be_tx_queues_destroy(struct be_adapter *adapter)
1478{
1479 struct be_queue_info *q;
1480
1481 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001482 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001483 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 be_queue_free(adapter, q);
1485
1486 q = &adapter->tx_obj.cq;
1487 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001488 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 be_queue_free(adapter, q);
1490
Sathya Perla859b1e42009-08-10 03:43:51 +00001491 /* Clear any residual events */
1492 be_eq_clean(adapter, &adapter->tx_eq);
1493
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 q = &adapter->tx_eq.q;
1495 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001496 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497 be_queue_free(adapter, q);
1498}
1499
1500static int be_tx_queues_create(struct be_adapter *adapter)
1501{
1502 struct be_queue_info *eq, *q, *cq;
1503
1504 adapter->tx_eq.max_eqd = 0;
1505 adapter->tx_eq.min_eqd = 0;
1506 adapter->tx_eq.cur_eqd = 96;
1507 adapter->tx_eq.enable_aic = false;
1508 /* Alloc Tx Event queue */
1509 eq = &adapter->tx_eq.q;
1510 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1511 return -1;
1512
1513 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001514 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001516
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001517 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001518
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001519
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 /* Alloc TX eth compl queue */
1521 cq = &adapter->tx_obj.cq;
1522 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1523 sizeof(struct be_eth_tx_compl)))
1524 goto tx_eq_destroy;
1525
1526 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001527 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528 goto tx_cq_free;
1529
1530 /* Alloc TX eth queue */
1531 q = &adapter->tx_obj.q;
1532 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1533 goto tx_cq_destroy;
1534
1535 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001536 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537 goto tx_q_free;
1538 return 0;
1539
1540tx_q_free:
1541 be_queue_free(adapter, q);
1542tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001543 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544tx_cq_free:
1545 be_queue_free(adapter, cq);
1546tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001547 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548tx_eq_free:
1549 be_queue_free(adapter, eq);
1550 return -1;
1551}
1552
1553static void be_rx_queues_destroy(struct be_adapter *adapter)
1554{
1555 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001556 struct be_rx_obj *rxo;
1557 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558
Sathya Perla3abcded2010-10-03 22:12:27 -07001559 for_all_rx_queues(adapter, rxo, i) {
1560 q = &rxo->q;
1561 if (q->created) {
1562 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1563 /* After the rxq is invalidated, wait for a grace time
1564 * of 1ms for all dma to end and the flush compl to
1565 * arrive
1566 */
1567 mdelay(1);
1568 be_rx_q_clean(adapter, rxo);
1569 }
1570 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001571
Sathya Perla3abcded2010-10-03 22:12:27 -07001572 q = &rxo->cq;
1573 if (q->created)
1574 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1575 be_queue_free(adapter, q);
1576
1577 /* Clear any residual events */
1578 q = &rxo->rx_eq.q;
1579 if (q->created) {
1580 be_eq_clean(adapter, &rxo->rx_eq);
1581 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1582 }
1583 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585}
1586
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001587static u32 be_num_rxqs_want(struct be_adapter *adapter)
1588{
1589 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1590 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1591 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1592 } else {
1593 dev_warn(&adapter->pdev->dev,
1594 "No support for multiple RX queues\n");
1595 return 1;
1596 }
1597}
1598
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599static int be_rx_queues_create(struct be_adapter *adapter)
1600{
1601 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001602 struct be_rx_obj *rxo;
1603 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001604
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001605 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1606 msix_enabled(adapter) ?
1607 adapter->num_msix_vec - 1 : 1);
1608 if (adapter->num_rx_qs != MAX_RX_QS)
1609 dev_warn(&adapter->pdev->dev,
1610 "Can create only %d RX queues", adapter->num_rx_qs);
1611
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001613 for_all_rx_queues(adapter, rxo, i) {
1614 rxo->adapter = adapter;
1615 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1616 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617
Sathya Perla3abcded2010-10-03 22:12:27 -07001618 /* EQ */
1619 eq = &rxo->rx_eq.q;
1620 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1621 sizeof(struct be_eq_entry));
1622 if (rc)
1623 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624
Sathya Perla3abcded2010-10-03 22:12:27 -07001625 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1626 if (rc)
1627 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001629 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001630
Sathya Perla3abcded2010-10-03 22:12:27 -07001631 /* CQ */
1632 cq = &rxo->cq;
1633 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1634 sizeof(struct be_eth_rx_compl));
1635 if (rc)
1636 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637
Sathya Perla3abcded2010-10-03 22:12:27 -07001638 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1639 if (rc)
1640 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001641 /* Rx Q */
1642 q = &rxo->q;
1643 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1644 sizeof(struct be_eth_rx_d));
1645 if (rc)
1646 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647
Sathya Perla3abcded2010-10-03 22:12:27 -07001648 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1649 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1650 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1651 if (rc)
1652 goto err;
1653 }
1654
1655 if (be_multi_rxq(adapter)) {
1656 u8 rsstable[MAX_RSS_QS];
1657
1658 for_all_rss_queues(adapter, rxo, i)
1659 rsstable[i] = rxo->rss_id;
1660
1661 rc = be_cmd_rss_config(adapter, rsstable,
1662 adapter->num_rx_qs - 1);
1663 if (rc)
1664 goto err;
1665 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666
1667 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001668err:
1669 be_rx_queues_destroy(adapter);
1670 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001673static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001674{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001675 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1676 if (!eqe->evt)
1677 return false;
1678 else
1679 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001680}
1681
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682static irqreturn_t be_intx(int irq, void *dev)
1683{
1684 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001685 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001686 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001688 if (lancer_chip(adapter)) {
1689 if (event_peek(&adapter->tx_eq))
1690 tx = event_handle(adapter, &adapter->tx_eq);
1691 for_all_rx_queues(adapter, rxo, i) {
1692 if (event_peek(&rxo->rx_eq))
1693 rx |= event_handle(adapter, &rxo->rx_eq);
1694 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001696 if (!(tx || rx))
1697 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001698
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001699 } else {
1700 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1701 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1702 if (!isr)
1703 return IRQ_NONE;
1704
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001705 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001706 event_handle(adapter, &adapter->tx_eq);
1707
1708 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001709 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001710 event_handle(adapter, &rxo->rx_eq);
1711 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001712 }
Sathya Perlac001c212009-07-01 01:06:07 +00001713
Sathya Perla8788fdc2009-07-27 22:52:03 +00001714 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715}
1716
1717static irqreturn_t be_msix_rx(int irq, void *dev)
1718{
Sathya Perla3abcded2010-10-03 22:12:27 -07001719 struct be_rx_obj *rxo = dev;
1720 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721
Sathya Perla3abcded2010-10-03 22:12:27 -07001722 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723
1724 return IRQ_HANDLED;
1725}
1726
Sathya Perla5fb379e2009-06-18 00:02:59 +00001727static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728{
1729 struct be_adapter *adapter = dev;
1730
Sathya Perla8788fdc2009-07-27 22:52:03 +00001731 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001732
1733 return IRQ_HANDLED;
1734}
1735
Sathya Perla2e588f82011-03-11 02:49:26 +00001736static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737{
Sathya Perla2e588f82011-03-11 02:49:26 +00001738 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739}
1740
stephen hemminger49b05222010-10-21 07:50:48 +00001741static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742{
1743 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001744 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1745 struct be_adapter *adapter = rxo->adapter;
1746 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001747 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 u32 work_done;
1749
Sathya Perla3abcded2010-10-03 22:12:27 -07001750 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001752 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753 if (!rxcp)
1754 break;
1755
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001756 /* Ignore flush completions */
Sathya Perla2e588f82011-03-11 02:49:26 +00001757 if (rxcp->num_rcvd) {
1758 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001759 be_rx_compl_process_gro(adapter, rxo, rxcp);
1760 else
1761 be_rx_compl_process(adapter, rxo, rxcp);
1762 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001763 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764 }
1765
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001768 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769
1770 /* All consumed */
1771 if (work_done < budget) {
1772 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001773 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 } else {
1775 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001776 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777 }
1778 return work_done;
1779}
1780
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001781/* As TX and MCC share the same EQ check for both TX and MCC completions.
1782 * For TX/MCC we don't honour budget; consume everything
1783 */
1784static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001785{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001786 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1787 struct be_adapter *adapter =
1788 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001789 struct be_queue_info *txq = &adapter->tx_obj.q;
1790 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001792 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001793 u16 end_idx;
1794
Sathya Perla5fb379e2009-06-18 00:02:59 +00001795 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001797 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001799 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800 }
1801
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001802 mcc_compl = be_process_mcc(adapter, &status);
1803
1804 napi_complete(napi);
1805
1806 if (mcc_compl) {
1807 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1808 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1809 }
1810
1811 if (tx_compl) {
1812 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001813
1814 /* As Tx wrbs have been freed up, wake up netdev queue if
1815 * it was stopped due to lack of tx wrbs.
1816 */
1817 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001819 netif_wake_queue(adapter->netdev);
1820 }
1821
Sathya Perla3abcded2010-10-03 22:12:27 -07001822 tx_stats(adapter)->be_tx_events++;
1823 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825
1826 return 1;
1827}
1828
Ajit Khaparded053de92010-09-03 06:23:30 +00001829void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001830{
1831 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1832 u32 i;
1833
1834 pci_read_config_dword(adapter->pdev,
1835 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1836 pci_read_config_dword(adapter->pdev,
1837 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1838 pci_read_config_dword(adapter->pdev,
1839 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1840 pci_read_config_dword(adapter->pdev,
1841 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1842
1843 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1844 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1845
Ajit Khaparded053de92010-09-03 06:23:30 +00001846 if (ue_status_lo || ue_status_hi) {
1847 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001848 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001849 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1850 }
1851
Ajit Khaparde7c185272010-07-29 06:16:33 +00001852 if (ue_status_lo) {
1853 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1854 if (ue_status_lo & 1)
1855 dev_err(&adapter->pdev->dev,
1856 "UE: %s bit set\n", ue_status_low_desc[i]);
1857 }
1858 }
1859 if (ue_status_hi) {
1860 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1861 if (ue_status_hi & 1)
1862 dev_err(&adapter->pdev->dev,
1863 "UE: %s bit set\n", ue_status_hi_desc[i]);
1864 }
1865 }
1866
1867}
1868
Sathya Perlaea1dae12009-03-19 23:56:20 -07001869static void be_worker(struct work_struct *work)
1870{
1871 struct be_adapter *adapter =
1872 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001873 struct be_rx_obj *rxo;
1874 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001875
Sathya Perla16da8252011-03-21 20:49:27 +00001876 if (!adapter->ue_detected && !lancer_chip(adapter))
1877 be_detect_dump_ue(adapter);
1878
Somnath Koturf203af72010-10-25 23:01:03 +00001879 /* when interrupts are not yet enabled, just reap any pending
1880 * mcc completions */
1881 if (!netif_running(adapter->netdev)) {
1882 int mcc_compl, status = 0;
1883
1884 mcc_compl = be_process_mcc(adapter, &status);
1885
1886 if (mcc_compl) {
1887 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1888 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1889 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00001890
Somnath Koturf203af72010-10-25 23:01:03 +00001891 goto reschedule;
1892 }
1893
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001894 if (!adapter->stats_cmd_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001895 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001896
Sathya Perla4097f662009-03-24 16:40:13 -07001897 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001898
Sathya Perla3abcded2010-10-03 22:12:27 -07001899 for_all_rx_queues(adapter, rxo, i) {
1900 be_rx_rate_update(rxo);
1901 be_rx_eqd_update(adapter, rxo);
1902
1903 if (rxo->rx_post_starved) {
1904 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00001905 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07001906 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001907 }
1908
Somnath Koturf203af72010-10-25 23:01:03 +00001909reschedule:
Sathya Perlaea1dae12009-03-19 23:56:20 -07001910 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1911}
1912
Sathya Perla8d56ff12009-11-22 22:02:26 +00001913static void be_msix_disable(struct be_adapter *adapter)
1914{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001915 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00001916 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001917 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 }
1919}
1920
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921static void be_msix_enable(struct be_adapter *adapter)
1922{
Sathya Perla3abcded2010-10-03 22:12:27 -07001923#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001924 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001925
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001926 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07001927
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001928 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929 adapter->msix_entries[i].entry = i;
1930
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001931 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07001932 if (status == 0) {
1933 goto done;
1934 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001935 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07001936 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001937 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07001938 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07001939 }
1940 return;
1941done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001942 adapter->num_msix_vec = num_vec;
1943 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944}
1945
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001946static void be_sriov_enable(struct be_adapter *adapter)
1947{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001948 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001949#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001950 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00001951 int status, pos;
1952 u16 nvfs;
1953
1954 pos = pci_find_ext_capability(adapter->pdev,
1955 PCI_EXT_CAP_ID_SRIOV);
1956 pci_read_config_word(adapter->pdev,
1957 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1958
1959 if (num_vfs > nvfs) {
1960 dev_info(&adapter->pdev->dev,
1961 "Device supports %d VFs and not %d\n",
1962 nvfs, num_vfs);
1963 num_vfs = nvfs;
1964 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001965
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001966 status = pci_enable_sriov(adapter->pdev, num_vfs);
1967 adapter->sriov_enabled = status ? false : true;
1968 }
1969#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001970}
1971
1972static void be_sriov_disable(struct be_adapter *adapter)
1973{
1974#ifdef CONFIG_PCI_IOV
1975 if (adapter->sriov_enabled) {
1976 pci_disable_sriov(adapter->pdev);
1977 adapter->sriov_enabled = false;
1978 }
1979#endif
1980}
1981
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001982static inline int be_msix_vec_get(struct be_adapter *adapter,
1983 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001985 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00001986}
1987
1988static int be_request_irq(struct be_adapter *adapter,
1989 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001991{
1992 struct net_device *netdev = adapter->netdev;
1993 int vec;
1994
1995 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001996 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001997 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00001998}
1999
Sathya Perla3abcded2010-10-03 22:12:27 -07002000static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2001 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002002{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002003 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002004 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005}
2006
2007static int be_msix_register(struct be_adapter *adapter)
2008{
Sathya Perla3abcded2010-10-03 22:12:27 -07002009 struct be_rx_obj *rxo;
2010 int status, i;
2011 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012
Sathya Perla3abcded2010-10-03 22:12:27 -07002013 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2014 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015 if (status)
2016 goto err;
2017
Sathya Perla3abcded2010-10-03 22:12:27 -07002018 for_all_rx_queues(adapter, rxo, i) {
2019 sprintf(qname, "rxq%d", i);
2020 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2021 qname, rxo);
2022 if (status)
2023 goto err_msix;
2024 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002025
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002027
Sathya Perla3abcded2010-10-03 22:12:27 -07002028err_msix:
2029 be_free_irq(adapter, &adapter->tx_eq, adapter);
2030
2031 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2032 be_free_irq(adapter, &rxo->rx_eq, rxo);
2033
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034err:
2035 dev_warn(&adapter->pdev->dev,
2036 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002037 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038 return status;
2039}
2040
2041static int be_irq_register(struct be_adapter *adapter)
2042{
2043 struct net_device *netdev = adapter->netdev;
2044 int status;
2045
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002046 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047 status = be_msix_register(adapter);
2048 if (status == 0)
2049 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002050 /* INTx is not supported for VF */
2051 if (!be_physfn(adapter))
2052 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053 }
2054
2055 /* INTx */
2056 netdev->irq = adapter->pdev->irq;
2057 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2058 adapter);
2059 if (status) {
2060 dev_err(&adapter->pdev->dev,
2061 "INTx request IRQ failed - err %d\n", status);
2062 return status;
2063 }
2064done:
2065 adapter->isr_registered = true;
2066 return 0;
2067}
2068
2069static void be_irq_unregister(struct be_adapter *adapter)
2070{
2071 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002072 struct be_rx_obj *rxo;
2073 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074
2075 if (!adapter->isr_registered)
2076 return;
2077
2078 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002079 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080 free_irq(netdev->irq, adapter);
2081 goto done;
2082 }
2083
2084 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002085 be_free_irq(adapter, &adapter->tx_eq, adapter);
2086
2087 for_all_rx_queues(adapter, rxo, i)
2088 be_free_irq(adapter, &rxo->rx_eq, rxo);
2089
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002090done:
2091 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092}
2093
Sathya Perla889cd4b2010-05-30 23:33:45 +00002094static int be_close(struct net_device *netdev)
2095{
2096 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002097 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002098 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002099 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002100
Sathya Perla889cd4b2010-05-30 23:33:45 +00002101 be_async_mcc_disable(adapter);
2102
Sathya Perla889cd4b2010-05-30 23:33:45 +00002103 netif_carrier_off(netdev);
2104 adapter->link_up = false;
2105
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002106 if (!lancer_chip(adapter))
2107 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002108
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002109 for_all_rx_queues(adapter, rxo, i)
2110 napi_disable(&rxo->rx_eq.napi);
2111
2112 napi_disable(&tx_eq->napi);
2113
2114 if (lancer_chip(adapter)) {
2115 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2116 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2117 for_all_rx_queues(adapter, rxo, i)
2118 be_cq_notify(adapter, rxo->cq.id, false, 0);
2119 }
2120
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002121 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002122 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002123 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002124
2125 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002126 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002127 synchronize_irq(vec);
2128 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002129 } else {
2130 synchronize_irq(netdev->irq);
2131 }
2132 be_irq_unregister(adapter);
2133
Sathya Perla889cd4b2010-05-30 23:33:45 +00002134 /* Wait for all pending tx completions to arrive so that
2135 * all tx skbs are freed.
2136 */
2137 be_tx_compl_clean(adapter);
2138
2139 return 0;
2140}
2141
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142static int be_open(struct net_device *netdev)
2143{
2144 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002145 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002146 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002147 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002148 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002149 u8 mac_speed;
2150 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002151
Sathya Perla3abcded2010-10-03 22:12:27 -07002152 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002153 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002154 napi_enable(&rxo->rx_eq.napi);
2155 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002156 napi_enable(&tx_eq->napi);
2157
2158 be_irq_register(adapter);
2159
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002160 if (!lancer_chip(adapter))
2161 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002162
2163 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002164 for_all_rx_queues(adapter, rxo, i) {
2165 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2166 be_cq_notify(adapter, rxo->cq.id, true, 0);
2167 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002168 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002169
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002170 /* Now that interrupts are on we can process async mcc */
2171 be_async_mcc_enable(adapter);
2172
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002173 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002174 &link_speed, 0);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002175 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002176 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002177 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002178
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002179 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002180 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002181 if (status)
2182 goto err;
2183
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002184 status = be_cmd_set_flow_control(adapter,
2185 adapter->tx_fc, adapter->rx_fc);
2186 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002187 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002188 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002189
Sathya Perla889cd4b2010-05-30 23:33:45 +00002190 return 0;
2191err:
2192 be_close(adapter->netdev);
2193 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002194}
2195
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002196static int be_setup_wol(struct be_adapter *adapter, bool enable)
2197{
2198 struct be_dma_mem cmd;
2199 int status = 0;
2200 u8 mac[ETH_ALEN];
2201
2202 memset(mac, 0, ETH_ALEN);
2203
2204 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002205 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2206 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002207 if (cmd.va == NULL)
2208 return -1;
2209 memset(cmd.va, 0, cmd.size);
2210
2211 if (enable) {
2212 status = pci_write_config_dword(adapter->pdev,
2213 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2214 if (status) {
2215 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002216 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002217 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2218 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002219 return status;
2220 }
2221 status = be_cmd_enable_magic_wol(adapter,
2222 adapter->netdev->dev_addr, &cmd);
2223 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2224 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2225 } else {
2226 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2227 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2228 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2229 }
2230
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002231 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002232 return status;
2233}
2234
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002235/*
2236 * Generate a seed MAC address from the PF MAC Address using jhash.
2237 * MAC Address for VFs are assigned incrementally starting from the seed.
2238 * These addresses are programmed in the ASIC by the PF and the VF driver
2239 * queries for the MAC address during its probe.
2240 */
2241static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2242{
2243 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002244 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002245 u8 mac[ETH_ALEN];
2246
2247 be_vf_eth_addr_generate(adapter, mac);
2248
2249 for (vf = 0; vf < num_vfs; vf++) {
2250 status = be_cmd_pmac_add(adapter, mac,
2251 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002252 &adapter->vf_cfg[vf].vf_pmac_id,
2253 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002254 if (status)
2255 dev_err(&adapter->pdev->dev,
2256 "Mac address add failed for VF %d\n", vf);
2257 else
2258 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2259
2260 mac[5] += 1;
2261 }
2262 return status;
2263}
2264
2265static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2266{
2267 u32 vf;
2268
2269 for (vf = 0; vf < num_vfs; vf++) {
2270 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2271 be_cmd_pmac_del(adapter,
2272 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002273 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002274 }
2275}
2276
Sathya Perla5fb379e2009-06-18 00:02:59 +00002277static int be_setup(struct be_adapter *adapter)
2278{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002279 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002280 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002282 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002283
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002284 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2285 BE_IF_FLAGS_BROADCAST |
2286 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002287
2288 if (be_physfn(adapter)) {
2289 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2290 BE_IF_FLAGS_PROMISCUOUS |
2291 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2292 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002293
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002294 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002295 cap_flags |= BE_IF_FLAGS_RSS;
2296 en_flags |= BE_IF_FLAGS_RSS;
2297 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002298 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002299
2300 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2301 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002302 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303 if (status != 0)
2304 goto do_none;
2305
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002306 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002307 if (adapter->sriov_enabled) {
2308 while (vf < num_vfs) {
2309 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2310 BE_IF_FLAGS_BROADCAST;
2311 status = be_cmd_if_create(adapter, cap_flags,
2312 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002313 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002314 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002315 if (status) {
2316 dev_err(&adapter->pdev->dev,
2317 "Interface Create failed for VF %d\n",
2318 vf);
2319 goto if_destroy;
2320 }
2321 adapter->vf_cfg[vf].vf_pmac_id =
2322 BE_INVALID_PMAC_ID;
2323 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002324 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002325 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002326 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002327 status = be_cmd_mac_addr_query(adapter, mac,
2328 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2329 if (!status) {
2330 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2331 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2332 }
2333 }
2334
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002335 status = be_tx_queues_create(adapter);
2336 if (status != 0)
2337 goto if_destroy;
2338
2339 status = be_rx_queues_create(adapter);
2340 if (status != 0)
2341 goto tx_qs_destroy;
2342
Sathya Perla5fb379e2009-06-18 00:02:59 +00002343 status = be_mcc_queues_create(adapter);
2344 if (status != 0)
2345 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002347 adapter->link_speed = -1;
2348
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002349 return 0;
2350
Sathya Perla5fb379e2009-06-18 00:02:59 +00002351rx_qs_destroy:
2352 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353tx_qs_destroy:
2354 be_tx_queues_destroy(adapter);
2355if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002356 if (be_physfn(adapter) && adapter->sriov_enabled)
2357 for (vf = 0; vf < num_vfs; vf++)
2358 if (adapter->vf_cfg[vf].vf_if_handle)
2359 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002360 adapter->vf_cfg[vf].vf_if_handle,
2361 vf + 1);
2362 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002363do_none:
2364 return status;
2365}
2366
Sathya Perla5fb379e2009-06-18 00:02:59 +00002367static int be_clear(struct be_adapter *adapter)
2368{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002369 int vf;
2370
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002371 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002372 be_vf_eth_addr_rem(adapter);
2373
Sathya Perla1a8887d2009-08-17 00:58:41 +00002374 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002375 be_rx_queues_destroy(adapter);
2376 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002377 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002378
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002379 if (be_physfn(adapter) && adapter->sriov_enabled)
2380 for (vf = 0; vf < num_vfs; vf++)
2381 if (adapter->vf_cfg[vf].vf_if_handle)
2382 be_cmd_if_destroy(adapter,
2383 adapter->vf_cfg[vf].vf_if_handle,
2384 vf + 1);
2385
Ajit Khaparde658681f2011-02-11 13:34:46 +00002386 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002387
Sathya Perla2243e2e2009-11-22 22:02:03 +00002388 /* tell fw we're done with firing cmds */
2389 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002390 return 0;
2391}
2392
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393
Ajit Khaparde84517482009-09-04 03:12:16 +00002394#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002395static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002396 const u8 *p, u32 img_start, int image_size,
2397 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002398{
2399 u32 crc_offset;
2400 u8 flashed_crc[4];
2401 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002402
2403 crc_offset = hdr_size + img_start + image_size - 4;
2404
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002405 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002406
2407 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002408 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002409 if (status) {
2410 dev_err(&adapter->pdev->dev,
2411 "could not get crc from flash, not flashing redboot\n");
2412 return false;
2413 }
2414
2415 /*update redboot only if crc does not match*/
2416 if (!memcmp(flashed_crc, p, 4))
2417 return false;
2418 else
2419 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002420}
2421
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002422static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002423 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002424 struct be_dma_mem *flash_cmd, int num_of_images)
2425
Ajit Khaparde84517482009-09-04 03:12:16 +00002426{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002427 int status = 0, i, filehdr_size = 0;
2428 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002429 int num_bytes;
2430 const u8 *p = fw->data;
2431 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002432 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002433 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002434
Joe Perches215faf92010-12-21 02:16:10 -08002435 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002436 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2437 FLASH_IMAGE_MAX_SIZE_g3},
2438 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2439 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2440 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2441 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2442 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2443 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2444 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2445 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2446 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2447 FLASH_IMAGE_MAX_SIZE_g3},
2448 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2449 FLASH_IMAGE_MAX_SIZE_g3},
2450 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002451 FLASH_IMAGE_MAX_SIZE_g3},
2452 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2453 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002454 };
Joe Perches215faf92010-12-21 02:16:10 -08002455 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002456 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2457 FLASH_IMAGE_MAX_SIZE_g2},
2458 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2459 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2460 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2461 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2462 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2463 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2464 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2465 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2466 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2467 FLASH_IMAGE_MAX_SIZE_g2},
2468 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2469 FLASH_IMAGE_MAX_SIZE_g2},
2470 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2471 FLASH_IMAGE_MAX_SIZE_g2}
2472 };
2473
2474 if (adapter->generation == BE_GEN3) {
2475 pflashcomp = gen3_flash_types;
2476 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002477 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002478 } else {
2479 pflashcomp = gen2_flash_types;
2480 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002481 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002482 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002483 for (i = 0; i < num_comp; i++) {
2484 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2485 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2486 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002487 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2488 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002489 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2490 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002491 continue;
2492 p = fw->data;
2493 p += filehdr_size + pflashcomp[i].offset
2494 + (num_of_images * sizeof(struct image_hdr));
2495 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002496 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002497 total_bytes = pflashcomp[i].size;
2498 while (total_bytes) {
2499 if (total_bytes > 32*1024)
2500 num_bytes = 32*1024;
2501 else
2502 num_bytes = total_bytes;
2503 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002504
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002505 if (!total_bytes)
2506 flash_op = FLASHROM_OPER_FLASH;
2507 else
2508 flash_op = FLASHROM_OPER_SAVE;
2509 memcpy(req->params.data_buf, p, num_bytes);
2510 p += num_bytes;
2511 status = be_cmd_write_flashrom(adapter, flash_cmd,
2512 pflashcomp[i].optype, flash_op, num_bytes);
2513 if (status) {
2514 dev_err(&adapter->pdev->dev,
2515 "cmd to write to flash rom failed.\n");
2516 return -1;
2517 }
2518 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002519 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002520 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002521 return 0;
2522}
2523
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002524static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2525{
2526 if (fhdr == NULL)
2527 return 0;
2528 if (fhdr->build[0] == '3')
2529 return BE_GEN3;
2530 else if (fhdr->build[0] == '2')
2531 return BE_GEN2;
2532 else
2533 return 0;
2534}
2535
Ajit Khaparde84517482009-09-04 03:12:16 +00002536int be_load_fw(struct be_adapter *adapter, u8 *func)
2537{
2538 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2539 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002540 struct flash_file_hdr_g2 *fhdr;
2541 struct flash_file_hdr_g3 *fhdr3;
2542 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002543 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002544 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002545 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002546
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002547 if (!netif_running(adapter->netdev)) {
2548 dev_err(&adapter->pdev->dev,
2549 "Firmware load not allowed (interface is down)\n");
2550 return -EPERM;
2551 }
2552
Ajit Khaparde84517482009-09-04 03:12:16 +00002553 strcpy(fw_file, func);
2554
2555 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2556 if (status)
2557 goto fw_exit;
2558
2559 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002560 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002561 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2562
Ajit Khaparde84517482009-09-04 03:12:16 +00002563 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002564 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2565 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002566 if (!flash_cmd.va) {
2567 status = -ENOMEM;
2568 dev_err(&adapter->pdev->dev,
2569 "Memory allocation failure while flashing\n");
2570 goto fw_exit;
2571 }
2572
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002573 if ((adapter->generation == BE_GEN3) &&
2574 (get_ufigen_type(fhdr) == BE_GEN3)) {
2575 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002576 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2577 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002578 img_hdr_ptr = (struct image_hdr *) (fw->data +
2579 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002580 i * sizeof(struct image_hdr)));
2581 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2582 status = be_flash_data(adapter, fw, &flash_cmd,
2583 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002584 }
2585 } else if ((adapter->generation == BE_GEN2) &&
2586 (get_ufigen_type(fhdr) == BE_GEN2)) {
2587 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2588 } else {
2589 dev_err(&adapter->pdev->dev,
2590 "UFI and Interface are not compatible for flashing\n");
2591 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002592 }
2593
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002594 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2595 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002596 if (status) {
2597 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2598 goto fw_exit;
2599 }
2600
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002601 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002602
2603fw_exit:
2604 release_firmware(fw);
2605 return status;
2606}
2607
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002608static struct net_device_ops be_netdev_ops = {
2609 .ndo_open = be_open,
2610 .ndo_stop = be_close,
2611 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002612 .ndo_set_rx_mode = be_set_multicast_list,
2613 .ndo_set_mac_address = be_mac_addr_set,
2614 .ndo_change_mtu = be_change_mtu,
2615 .ndo_validate_addr = eth_validate_addr,
2616 .ndo_vlan_rx_register = be_vlan_register,
2617 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2618 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002619 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002620 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002621 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002622 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002623};
2624
2625static void be_netdev_init(struct net_device *netdev)
2626{
2627 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002628 struct be_rx_obj *rxo;
2629 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002631 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002632 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2633 NETIF_F_HW_VLAN_TX;
2634 if (be_multi_rxq(adapter))
2635 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002636
2637 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002638 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002639
Michał Mirosław79032642010-11-30 06:38:00 +00002640 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2641 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002642
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002643 if (lancer_chip(adapter))
2644 netdev->vlan_features |= NETIF_F_TSO6;
2645
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002646 netdev->flags |= IFF_MULTICAST;
2647
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002648 /* Default settings for Rx and Tx flow control */
2649 adapter->rx_fc = true;
2650 adapter->tx_fc = true;
2651
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002652 netif_set_gso_max_size(netdev, 65535);
2653
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002654 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2655
2656 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2657
Sathya Perla3abcded2010-10-03 22:12:27 -07002658 for_all_rx_queues(adapter, rxo, i)
2659 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2660 BE_NAPI_WEIGHT);
2661
Sathya Perla5fb379e2009-06-18 00:02:59 +00002662 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002663 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002664}
2665
2666static void be_unmap_pci_bars(struct be_adapter *adapter)
2667{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002668 if (adapter->csr)
2669 iounmap(adapter->csr);
2670 if (adapter->db)
2671 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002672 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002673 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002674}
2675
2676static int be_map_pci_bars(struct be_adapter *adapter)
2677{
2678 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002679 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002680
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002681 if (lancer_chip(adapter)) {
2682 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2683 pci_resource_len(adapter->pdev, 0));
2684 if (addr == NULL)
2685 return -ENOMEM;
2686 adapter->db = addr;
2687 return 0;
2688 }
2689
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002690 if (be_physfn(adapter)) {
2691 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2692 pci_resource_len(adapter->pdev, 2));
2693 if (addr == NULL)
2694 return -ENOMEM;
2695 adapter->csr = addr;
2696 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002697
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002698 if (adapter->generation == BE_GEN2) {
2699 pcicfg_reg = 1;
2700 db_reg = 4;
2701 } else {
2702 pcicfg_reg = 0;
2703 if (be_physfn(adapter))
2704 db_reg = 4;
2705 else
2706 db_reg = 0;
2707 }
2708 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2709 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710 if (addr == NULL)
2711 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002712 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002713
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002714 if (be_physfn(adapter)) {
2715 addr = ioremap_nocache(
2716 pci_resource_start(adapter->pdev, pcicfg_reg),
2717 pci_resource_len(adapter->pdev, pcicfg_reg));
2718 if (addr == NULL)
2719 goto pci_map_err;
2720 adapter->pcicfg = addr;
2721 } else
2722 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002723
2724 return 0;
2725pci_map_err:
2726 be_unmap_pci_bars(adapter);
2727 return -ENOMEM;
2728}
2729
2730
2731static void be_ctrl_cleanup(struct be_adapter *adapter)
2732{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002733 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002734
2735 be_unmap_pci_bars(adapter);
2736
2737 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002738 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2739 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002740
2741 mem = &adapter->mc_cmd_mem;
2742 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002743 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2744 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002745}
2746
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747static int be_ctrl_init(struct be_adapter *adapter)
2748{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002749 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2750 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002751 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002752 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002753
2754 status = be_map_pci_bars(adapter);
2755 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002756 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002757
2758 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002759 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2760 mbox_mem_alloc->size,
2761 &mbox_mem_alloc->dma,
2762 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002763 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002764 status = -ENOMEM;
2765 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002766 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002767
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002768 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2769 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2770 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2771 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002772
2773 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002774 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2775 mc_cmd_mem->size, &mc_cmd_mem->dma,
2776 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002777 if (mc_cmd_mem->va == NULL) {
2778 status = -ENOMEM;
2779 goto free_mbox;
2780 }
2781 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2782
Ivan Vecera29849612010-12-14 05:43:19 +00002783 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002784 spin_lock_init(&adapter->mcc_lock);
2785 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002787 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002788 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002789 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002790
2791free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002792 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2793 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002794
2795unmap_pci_bars:
2796 be_unmap_pci_bars(adapter);
2797
2798done:
2799 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002800}
2801
2802static void be_stats_cleanup(struct be_adapter *adapter)
2803{
Sathya Perla3abcded2010-10-03 22:12:27 -07002804 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002805
2806 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002807 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2808 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002809}
2810
2811static int be_stats_init(struct be_adapter *adapter)
2812{
Sathya Perla3abcded2010-10-03 22:12:27 -07002813 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002814
2815 cmd->size = sizeof(struct be_cmd_req_get_stats);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002816 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2817 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002818 if (cmd->va == NULL)
2819 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002820 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002821 return 0;
2822}
2823
2824static void __devexit be_remove(struct pci_dev *pdev)
2825{
2826 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002827
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002828 if (!adapter)
2829 return;
2830
Somnath Koturf203af72010-10-25 23:01:03 +00002831 cancel_delayed_work_sync(&adapter->work);
2832
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002833 unregister_netdev(adapter->netdev);
2834
Sathya Perla5fb379e2009-06-18 00:02:59 +00002835 be_clear(adapter);
2836
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002837 be_stats_cleanup(adapter);
2838
2839 be_ctrl_cleanup(adapter);
2840
Ajit Khaparde48f5a192011-04-06 18:08:30 +00002841 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002842 be_sriov_disable(adapter);
2843
Sathya Perla8d56ff12009-11-22 22:02:26 +00002844 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002845
2846 pci_set_drvdata(pdev, NULL);
2847 pci_release_regions(pdev);
2848 pci_disable_device(pdev);
2849
2850 free_netdev(adapter->netdev);
2851}
2852
Sathya Perla2243e2e2009-11-22 22:02:03 +00002853static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002854{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002855 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002856 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002857
Sathya Perla8788fdc2009-07-27 22:52:03 +00002858 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002859 if (status)
2860 return status;
2861
Sathya Perla3abcded2010-10-03 22:12:27 -07002862 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2863 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002864 if (status)
2865 return status;
2866
2867 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002868
2869 if (be_physfn(adapter)) {
2870 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002871 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002872
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002873 if (status)
2874 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002875
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002876 if (!is_valid_ether_addr(mac))
2877 return -EADDRNOTAVAIL;
2878
2879 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2880 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2881 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002882
Ajit Khaparde3486be22010-07-23 02:04:54 +00002883 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002884 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2885 else
2886 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2887
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002888 status = be_cmd_get_cntl_attributes(adapter);
2889 if (status)
2890 return status;
2891
Sathya Perla2e588f82011-03-11 02:49:26 +00002892 be_cmd_check_native_mode(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002893 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002894}
2895
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002896static int be_dev_family_check(struct be_adapter *adapter)
2897{
2898 struct pci_dev *pdev = adapter->pdev;
2899 u32 sli_intf = 0, if_type;
2900
2901 switch (pdev->device) {
2902 case BE_DEVICE_ID1:
2903 case OC_DEVICE_ID1:
2904 adapter->generation = BE_GEN2;
2905 break;
2906 case BE_DEVICE_ID2:
2907 case OC_DEVICE_ID2:
2908 adapter->generation = BE_GEN3;
2909 break;
2910 case OC_DEVICE_ID3:
2911 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2912 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2913 SLI_INTF_IF_TYPE_SHIFT;
2914
2915 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2916 if_type != 0x02) {
2917 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2918 return -EINVAL;
2919 }
2920 if (num_vfs > 0) {
2921 dev_err(&pdev->dev, "VFs not supported\n");
2922 return -EINVAL;
2923 }
2924 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2925 SLI_INTF_FAMILY_SHIFT);
2926 adapter->generation = BE_GEN3;
2927 break;
2928 default:
2929 adapter->generation = 0;
2930 }
2931 return 0;
2932}
2933
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00002934static int lancer_wait_ready(struct be_adapter *adapter)
2935{
2936#define SLIPORT_READY_TIMEOUT 500
2937 u32 sliport_status;
2938 int status = 0, i;
2939
2940 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2941 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2942 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2943 break;
2944
2945 msleep(20);
2946 }
2947
2948 if (i == SLIPORT_READY_TIMEOUT)
2949 status = -1;
2950
2951 return status;
2952}
2953
2954static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2955{
2956 int status;
2957 u32 sliport_status, err, reset_needed;
2958 status = lancer_wait_ready(adapter);
2959 if (!status) {
2960 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2961 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2962 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2963 if (err && reset_needed) {
2964 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2965 adapter->db + SLIPORT_CONTROL_OFFSET);
2966
2967 /* check adapter has corrected the error */
2968 status = lancer_wait_ready(adapter);
2969 sliport_status = ioread32(adapter->db +
2970 SLIPORT_STATUS_OFFSET);
2971 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2972 SLIPORT_STATUS_RN_MASK);
2973 if (status || sliport_status)
2974 status = -1;
2975 } else if (err || reset_needed) {
2976 status = -1;
2977 }
2978 }
2979 return status;
2980}
2981
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002982static int __devinit be_probe(struct pci_dev *pdev,
2983 const struct pci_device_id *pdev_id)
2984{
2985 int status = 0;
2986 struct be_adapter *adapter;
2987 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002988
2989 status = pci_enable_device(pdev);
2990 if (status)
2991 goto do_none;
2992
2993 status = pci_request_regions(pdev, DRV_NAME);
2994 if (status)
2995 goto disable_dev;
2996 pci_set_master(pdev);
2997
2998 netdev = alloc_etherdev(sizeof(struct be_adapter));
2999 if (netdev == NULL) {
3000 status = -ENOMEM;
3001 goto rel_reg;
3002 }
3003 adapter = netdev_priv(netdev);
3004 adapter->pdev = pdev;
3005 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003006
3007 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003008 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003009 goto free_netdev;
3010
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003011 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003012 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003013
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003014 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003015 if (!status) {
3016 netdev->features |= NETIF_F_HIGHDMA;
3017 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003018 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019 if (status) {
3020 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3021 goto free_netdev;
3022 }
3023 }
3024
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003025 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003026 if (adapter->sriov_enabled) {
3027 adapter->vf_cfg = kcalloc(num_vfs,
3028 sizeof(struct be_vf_cfg), GFP_KERNEL);
3029
3030 if (!adapter->vf_cfg)
3031 goto free_netdev;
3032 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003033
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003034 status = be_ctrl_init(adapter);
3035 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003036 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003037
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003038 if (lancer_chip(adapter)) {
3039 status = lancer_test_and_set_rdy_state(adapter);
3040 if (status) {
3041 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003042 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003043 }
3044 }
3045
Sathya Perla2243e2e2009-11-22 22:02:03 +00003046 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003047 if (be_physfn(adapter)) {
3048 status = be_cmd_POST(adapter);
3049 if (status)
3050 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003051 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003052
3053 /* tell fw we're ready to fire cmds */
3054 status = be_cmd_fw_init(adapter);
3055 if (status)
3056 goto ctrl_clean;
3057
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003058 status = be_cmd_reset_function(adapter);
3059 if (status)
3060 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003061
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062 status = be_stats_init(adapter);
3063 if (status)
3064 goto ctrl_clean;
3065
Sathya Perla2243e2e2009-11-22 22:02:03 +00003066 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067 if (status)
3068 goto stats_clean;
3069
Sathya Perla3abcded2010-10-03 22:12:27 -07003070 be_msix_enable(adapter);
3071
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003072 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073
Sathya Perla5fb379e2009-06-18 00:02:59 +00003074 status = be_setup(adapter);
3075 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003076 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003077
Sathya Perla3abcded2010-10-03 22:12:27 -07003078 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003079 status = register_netdev(netdev);
3080 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003081 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003082 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003083
Ajit Khapardee6319362011-02-11 13:35:41 +00003084 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003085 u8 mac_speed;
3086 bool link_up;
3087 u16 vf, lnk_speed;
3088
Ajit Khapardee6319362011-02-11 13:35:41 +00003089 status = be_vf_eth_addr_config(adapter);
3090 if (status)
3091 goto unreg_netdev;
Ajit Khaparded0381c42011-04-19 12:11:55 +00003092
3093 for (vf = 0; vf < num_vfs; vf++) {
3094 status = be_cmd_link_status_query(adapter, &link_up,
3095 &mac_speed, &lnk_speed, vf + 1);
3096 if (!status)
3097 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3098 else
3099 goto unreg_netdev;
3100 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003101 }
3102
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003103 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00003104 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003105 return 0;
3106
Ajit Khapardee6319362011-02-11 13:35:41 +00003107unreg_netdev:
3108 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003109unsetup:
3110 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003111msix_disable:
3112 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003113stats_clean:
3114 be_stats_cleanup(adapter);
3115ctrl_clean:
3116 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003117free_vf_cfg:
3118 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003119free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003120 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003121 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003122 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003123rel_reg:
3124 pci_release_regions(pdev);
3125disable_dev:
3126 pci_disable_device(pdev);
3127do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003128 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003129 return status;
3130}
3131
3132static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3133{
3134 struct be_adapter *adapter = pci_get_drvdata(pdev);
3135 struct net_device *netdev = adapter->netdev;
3136
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003137 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003138 if (adapter->wol)
3139 be_setup_wol(adapter, true);
3140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003141 netif_device_detach(netdev);
3142 if (netif_running(netdev)) {
3143 rtnl_lock();
3144 be_close(netdev);
3145 rtnl_unlock();
3146 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003147 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003148 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003150 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003151 pci_save_state(pdev);
3152 pci_disable_device(pdev);
3153 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3154 return 0;
3155}
3156
3157static int be_resume(struct pci_dev *pdev)
3158{
3159 int status = 0;
3160 struct be_adapter *adapter = pci_get_drvdata(pdev);
3161 struct net_device *netdev = adapter->netdev;
3162
3163 netif_device_detach(netdev);
3164
3165 status = pci_enable_device(pdev);
3166 if (status)
3167 return status;
3168
3169 pci_set_power_state(pdev, 0);
3170 pci_restore_state(pdev);
3171
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003172 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003173 /* tell fw we're ready to fire cmds */
3174 status = be_cmd_fw_init(adapter);
3175 if (status)
3176 return status;
3177
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003178 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003179 if (netif_running(netdev)) {
3180 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003181 be_open(netdev);
3182 rtnl_unlock();
3183 }
3184 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003185
3186 if (adapter->wol)
3187 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003188
3189 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003190 return 0;
3191}
3192
Sathya Perla82456b02010-02-17 01:35:37 +00003193/*
3194 * An FLR will stop BE from DMAing any data.
3195 */
3196static void be_shutdown(struct pci_dev *pdev)
3197{
3198 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003199
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003200 if (!adapter)
3201 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003202
Sathya Perla0f4a6822011-03-21 20:49:28 +00003203 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003204
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003205 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003206
Sathya Perla82456b02010-02-17 01:35:37 +00003207 if (adapter->wol)
3208 be_setup_wol(adapter, true);
3209
Ajit Khaparde57841862011-04-06 18:08:43 +00003210 be_cmd_reset_function(adapter);
3211
Sathya Perla82456b02010-02-17 01:35:37 +00003212 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003213}
3214
Sathya Perlacf588472010-02-14 21:22:01 +00003215static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3216 pci_channel_state_t state)
3217{
3218 struct be_adapter *adapter = pci_get_drvdata(pdev);
3219 struct net_device *netdev = adapter->netdev;
3220
3221 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3222
3223 adapter->eeh_err = true;
3224
3225 netif_device_detach(netdev);
3226
3227 if (netif_running(netdev)) {
3228 rtnl_lock();
3229 be_close(netdev);
3230 rtnl_unlock();
3231 }
3232 be_clear(adapter);
3233
3234 if (state == pci_channel_io_perm_failure)
3235 return PCI_ERS_RESULT_DISCONNECT;
3236
3237 pci_disable_device(pdev);
3238
3239 return PCI_ERS_RESULT_NEED_RESET;
3240}
3241
3242static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3243{
3244 struct be_adapter *adapter = pci_get_drvdata(pdev);
3245 int status;
3246
3247 dev_info(&adapter->pdev->dev, "EEH reset\n");
3248 adapter->eeh_err = false;
3249
3250 status = pci_enable_device(pdev);
3251 if (status)
3252 return PCI_ERS_RESULT_DISCONNECT;
3253
3254 pci_set_master(pdev);
3255 pci_set_power_state(pdev, 0);
3256 pci_restore_state(pdev);
3257
3258 /* Check if card is ok and fw is ready */
3259 status = be_cmd_POST(adapter);
3260 if (status)
3261 return PCI_ERS_RESULT_DISCONNECT;
3262
3263 return PCI_ERS_RESULT_RECOVERED;
3264}
3265
3266static void be_eeh_resume(struct pci_dev *pdev)
3267{
3268 int status = 0;
3269 struct be_adapter *adapter = pci_get_drvdata(pdev);
3270 struct net_device *netdev = adapter->netdev;
3271
3272 dev_info(&adapter->pdev->dev, "EEH resume\n");
3273
3274 pci_save_state(pdev);
3275
3276 /* tell fw we're ready to fire cmds */
3277 status = be_cmd_fw_init(adapter);
3278 if (status)
3279 goto err;
3280
3281 status = be_setup(adapter);
3282 if (status)
3283 goto err;
3284
3285 if (netif_running(netdev)) {
3286 status = be_open(netdev);
3287 if (status)
3288 goto err;
3289 }
3290 netif_device_attach(netdev);
3291 return;
3292err:
3293 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003294}
3295
3296static struct pci_error_handlers be_eeh_handlers = {
3297 .error_detected = be_eeh_err_detected,
3298 .slot_reset = be_eeh_reset,
3299 .resume = be_eeh_resume,
3300};
3301
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003302static struct pci_driver be_driver = {
3303 .name = DRV_NAME,
3304 .id_table = be_dev_ids,
3305 .probe = be_probe,
3306 .remove = be_remove,
3307 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003308 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003309 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003310 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003311};
3312
3313static int __init be_init_module(void)
3314{
Joe Perches8e95a202009-12-03 07:58:21 +00003315 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3316 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003317 printk(KERN_WARNING DRV_NAME
3318 " : Module param rx_frag_size must be 2048/4096/8192."
3319 " Using 2048\n");
3320 rx_frag_size = 2048;
3321 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003322
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003323 return pci_register_driver(&be_driver);
3324}
3325module_init(be_init_module);
3326
3327static void __exit be_exit_module(void)
3328{
3329 pci_unregister_driver(&be_driver);
3330}
3331module_exit(be_exit_module);