blob: bc110782da88552192299bc89548b961772ae533 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
Sathya Perla2e588f82011-03-11 02:49:26 +000028static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000030module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000149
Sathya Perlacf588472010-02-14 21:22:01 +0000150 if (adapter->eeh_err)
151 return;
152
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 iowrite32(reg, addr);
161}
162
Sathya Perla8788fdc2009-07-27 22:52:03 +0000163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000168
169 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171}
172
Sathya Perla8788fdc2009-07-27 22:52:03 +0000173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000178
179 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700181}
182
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000190
191 if (adapter->eeh_err)
192 return;
193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201}
202
Sathya Perla8788fdc2009-07-27 22:52:03 +0000203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000209
210 if (adapter->eeh_err)
211 return;
212
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
Ajit Khapardef8617e02011-02-11 13:36:37 +0000234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000236 if (status)
237 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700238
Sathya Perlaa65027e2009-08-17 00:58:04 +0000239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000240 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000241netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
Sathya Perlab31c50a2009-09-17 10:30:13 -0700248void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700249{
Sathya Perla3abcded2010-10-03 22:12:27 -0700250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000255 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700256 struct be_rx_obj *rxo;
257 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258
Sathya Perla3abcded2010-10-03 22:12:27 -0700259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 }
268
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700271
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700286
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
291
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000296
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700302}
303
Sathya Perla8788fdc2009-07-27 22:52:03 +0000304void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700305{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 struct net_device *netdev = adapter->netdev;
307
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000309 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000310 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000311 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000314 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700317 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000318 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700319 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700324{
Sathya Perla3abcded2010-10-03 22:12:27 -0700325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700338
339 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700340 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700341 return;
342
Sathya Perla3abcded2010-10-03 22:12:27 -0700343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700344 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700345
Sathya Perla4097f662009-03-24 16:40:13 -0700346 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700358
359 rx_eq->cur_eqd = eqd;
360}
361
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
Sathya Perla4097f662009-03-24 16:40:13 -0700373static void be_tx_rate_update(struct be_adapter *adapter)
374{
Sathya Perla3abcded2010-10-03 22:12:27 -0700375 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700394static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700396{
Sathya Perla3abcded2010-10-03 22:12:27 -0700397 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700402 if (stopped)
403 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000407static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700410 int cnt = (skb->len > skb->data_len);
411
412 cnt += skb_shinfo(skb)->nr_frags;
413
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700414 /* to account for hdr wrb */
415 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000416 if (lancer_chip(adapter) || !(cnt & 1)) {
417 *dummy = false;
418 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700419 /* add a dummy to make it an even num */
420 cnt++;
421 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000422 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424 return cnt;
425}
426
427static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428{
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432}
433
Somnath Koturcc4ce022010-10-21 07:11:14 -0700434static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700436{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700437 u8 vlan_prio = 0;
438 u16 vlan_tag = 0;
439
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700440 memset(hdr, 0, sizeof(*hdr));
441
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000444 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453 if (is_tcp_pkt(skb))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455 tcpcs, hdr, 1);
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 udpcs, hdr, 1);
459 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461 if (is_tcp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465 }
466
Somnath Koturcc4ce022010-10-21 07:11:14 -0700467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476 }
477
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482}
483
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000484static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000485 bool unmap_single)
486{
487 dma_addr_t dma;
488
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000492 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000493 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000494 dma_unmap_single(dev, dma, wrb->frag_len,
495 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000496 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000498 }
499}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500
501static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503{
Sathya Perla7101e112010-03-22 20:41:12 +0000504 dma_addr_t busaddr;
505 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000506 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000511 bool map_single = false;
512 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700514 hdr = queue_head_node(txq);
515 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000516 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517
David S. Millerebc8d2a2009-06-09 01:01:31 -0700518 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700519 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000522 goto dma_err;
523 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527 queue_head_inc(txq);
528 copied += len;
529 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530
David S. Millerebc8d2a2009-06-09 01:01:31 -0700531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000537 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541 queue_head_inc(txq);
542 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 }
544
545 if (dummy_wrb) {
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, 0, 0);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 }
551
Somnath Koturcc4ce022010-10-21 07:11:14 -0700552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000556dma_err:
557 txq->head = map_head;
558 while (copied) {
559 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000560 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000561 map_single = false;
562 copied -= wrb->frag_len;
563 queue_head_inc(txq);
564 }
565 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700566}
567
Stephen Hemminger613573252009-08-31 19:50:58 +0000568static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700569 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700570{
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
577
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000581 if (copied) {
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
589 */
Sathya Perla7101e112010-03-22 20:41:12 +0000590 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592 txq->len) {
593 netif_stop_queue(netdev);
594 stopped = true;
595 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000597 be_txq_notify(adapter, txq->id, wrb_cnt);
598
Ajit Khaparde91992e42010-02-19 13:57:12 +0000599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000601 } else {
602 txq->head = start;
603 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 return NETDEV_TX_OK;
606}
607
608static int be_change_mtu(struct net_device *netdev, int new_mtu)
609{
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000616 BE_MIN_MTU,
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 return -EINVAL;
619 }
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
623 return 0;
624}
625
626/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000630static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
633 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000634 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000635 u32 if_handle;
636
637 if (vf) {
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642
Ajit Khaparde82903e42010-02-09 01:34:57 +0000643 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000645 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
648 ntags++;
649 }
650 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000657
Sathya Perlab31c50a2009-09-17 10:30:13 -0700658 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659}
660
661static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662{
663 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666}
667
668static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669{
670 struct be_adapter *adapter = netdev_priv(netdev);
671
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000672 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000673 if (!be_physfn(adapter))
674 return;
675
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000678 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
681static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682{
683 struct be_adapter *adapter = netdev_priv(netdev);
684
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000688 if (!be_physfn(adapter))
689 return;
690
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000692 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000693 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694}
695
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696static void be_set_multicast_list(struct net_device *netdev)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699
700 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000701 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000702 adapter->promiscuous = true;
703 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000705
706 /* BE was previously in promiscous mode; disable it */
707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000709 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000710 }
711
Sathya Perlae7b909a2009-11-22 22:01:10 +0000712 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000716 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000717 goto done;
718 }
719
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800721 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000722done:
723 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724}
725
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000726static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 int status;
730
731 if (!adapter->sriov_enabled)
732 return -EPERM;
733
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735 return -EINVAL;
736
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000741
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000745
746 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000749 else
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000752 return status;
753}
754
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000755static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
757{
758 struct be_adapter *adapter = netdev_priv(netdev);
759
760 if (!adapter->sriov_enabled)
761 return -EPERM;
762
763 if (vf >= num_vfs)
764 return -EINVAL;
765
766 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000769 vi->qos = 0;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772 return 0;
773}
774
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000775static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
777{
778 struct be_adapter *adapter = netdev_priv(netdev);
779 int status = 0;
780
781 if (!adapter->sriov_enabled)
782 return -EPERM;
783
784 if ((vf >= num_vfs) || (vlan > 4095))
785 return -EINVAL;
786
787 if (vlan) {
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
790 } else {
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
793 }
794
795 status = be_vid_config(adapter, true, vf);
796
797 if (status)
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
800 return status;
801}
802
Ajit Khapardee1d18732010-07-23 01:52:13 +0000803static int be_set_vf_tx_rate(struct net_device *netdev,
804 int vf, int rate)
805{
806 struct be_adapter *adapter = netdev_priv(netdev);
807 int status = 0;
808
809 if (!adapter->sriov_enabled)
810 return -EPERM;
811
812 if ((vf >= num_vfs) || (rate < 0))
813 return -EINVAL;
814
815 if (rate > 10000)
816 rate = 10000;
817
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000820
821 if (status)
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
824 return status;
825}
826
Sathya Perla3abcded2010-10-03 22:12:27 -0700827static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828{
Sathya Perla3abcded2010-10-03 22:12:27 -0700829 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700830 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831
Sathya Perla4097f662009-03-24 16:40:13 -0700832 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700835 return;
836 }
837
838 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700839 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700840 return;
841
Sathya Perla3abcded2010-10-03 22:12:27 -0700842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700846}
847
Sathya Perla3abcded2010-10-03 22:12:27 -0700848static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000849 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700850{
Sathya Perla3abcded2010-10-03 22:12:27 -0700851 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700852
Sathya Perla3abcded2010-10-03 22:12:27 -0700853 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700856 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700858 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000859 if (rxcp->err)
860 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861}
862
Sathya Perla2e588f82011-03-11 02:49:26 +0000863static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700864{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700869}
870
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700871static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700872get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
874 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875{
876 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700877 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878
Sathya Perla3abcded2010-10-03 22:12:27 -0700879 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880 BUG_ON(!rx_page_info->page);
881
Ajit Khaparde205859a2010-02-09 01:34:21 +0000882 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000886 rx_page_info->last_page_user = false;
887 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888
889 atomic_dec(&rxq->used);
890 return rx_page_info;
891}
892
893/* Throwaway the data in the Rx completion */
894static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700895 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000896 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897{
Sathya Perla3abcded2010-10-03 22:12:27 -0700898 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +0000900 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +0000902 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +0000903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +0000904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +0000906 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 }
908}
909
910/*
911 * skb_fill_rx_data forms a complete skb for an ether frame
912 * indicated by rxcp.
913 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700914static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700916{
Sathya Perla3abcded2010-10-03 22:12:27 -0700917 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +0000919 u16 i, j;
920 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700921 u8 *start;
922
Sathya Perla2e588f82011-03-11 02:49:26 +0000923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700924 start = page_address(page_info->page) + page_info->page_offset;
925 prefetch(start);
926
927 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +0000928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700929
930 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +0000931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
937 skb->data_len = 0;
938 skb->tail += curr_frag_len;
939 } else {
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
947 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000948 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700949
Sathya Perla2e588f82011-03-11 02:49:26 +0000950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
952 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700953 }
954
955 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +0000956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700978
Sathya Perla2e588f82011-03-11 02:49:26 +0000979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000981 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000983 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984}
985
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700986/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700988 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000989 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700990{
991 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +0000992
Eric Dumazet89d71a62009-10-13 05:34:20 +0000993 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +0000994 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700995 if (net_ratelimit())
996 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -0700997 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700998 return;
999 }
1000
Sathya Perla2e588f82011-03-11 02:49:26 +00001001 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001003 if (likely(adapter->rx_csum && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001004 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001005 else
1006 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001007
1008 skb->truesize = skb->len + sizeof(struct sk_buff);
1009 skb->protocol = eth_type_trans(skb, adapter->netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001010 if (adapter->netdev->features & NETIF_F_RXHASH)
1011 skb->rxhash = rxcp->rss_hash;
1012
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001013
Sathya Perla2e588f82011-03-11 02:49:26 +00001014 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001015 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016 kfree_skb(skb);
1017 return;
1018 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001019 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020 } else {
1021 netif_receive_skb(skb);
1022 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023}
1024
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001025/* Process the RX completion indicated by rxcp when GRO is enabled */
1026static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001027 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001028 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029{
1030 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001031 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001032 struct be_queue_info *rxq = &rxo->q;
1033 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001034 u16 remaining, curr_frag_len;
1035 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001036
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001037 skb = napi_get_frags(&eq_obj->napi);
1038 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001039 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001040 return;
1041 }
1042
Sathya Perla2e588f82011-03-11 02:49:26 +00001043 remaining = rxcp->pkt_size;
1044 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1045 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001046
1047 curr_frag_len = min(remaining, rx_frag_size);
1048
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001049 /* Coalesce all frags from the same physical page in one slot */
1050 if (i == 0 || page_info->page_offset == 0) {
1051 /* First frag or Fresh page */
1052 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001053 skb_shinfo(skb)->frags[j].page = page_info->page;
1054 skb_shinfo(skb)->frags[j].page_offset =
1055 page_info->page_offset;
1056 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001057 } else {
1058 put_page(page_info->page);
1059 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001060 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001061
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001063 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064 memset(page_info, 0, sizeof(*page_info));
1065 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001066 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001068 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001069 skb->len = rxcp->pkt_size;
1070 skb->data_len = rxcp->pkt_size;
1071 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001072 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001073 if (adapter->netdev->features & NETIF_F_RXHASH)
1074 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001075
Sathya Perla2e588f82011-03-11 02:49:26 +00001076 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001077 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001078 else
1079 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080}
1081
Sathya Perla2e588f82011-03-11 02:49:26 +00001082static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083 struct be_eth_rx_compl *compl,
1084 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085{
Sathya Perla2e588f82011-03-11 02:49:26 +00001086 rxcp->pkt_size =
1087 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001091 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001092 rxcp->ip_csum =
1093 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094 rxcp->l4_csum =
1095 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096 rxcp->ipv6 =
1097 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098 rxcp->rxq_idx =
1099 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100 rxcp->num_rcvd =
1101 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102 rxcp->pkt_type =
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001104 rxcp->rss_hash =
1105 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001106 if (rxcp->vlanf) {
1107 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1108 compl);
1109 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1110 compl);
1111 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001112}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113
Sathya Perla2e588f82011-03-11 02:49:26 +00001114static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1115 struct be_eth_rx_compl *compl,
1116 struct be_rx_compl_info *rxcp)
1117{
1118 rxcp->pkt_size =
1119 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1120 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1121 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1122 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001123 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001124 rxcp->ip_csum =
1125 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1126 rxcp->l4_csum =
1127 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1128 rxcp->ipv6 =
1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1130 rxcp->rxq_idx =
1131 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1132 rxcp->num_rcvd =
1133 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1134 rxcp->pkt_type =
1135 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001136 rxcp->rss_hash =
1137 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001138 if (rxcp->vlanf) {
1139 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1140 compl);
1141 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1142 compl);
1143 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001144}
1145
1146static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1147{
1148 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1149 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1150 struct be_adapter *adapter = rxo->adapter;
1151
1152 /* For checking the valid bit it is Ok to use either definition as the
1153 * valid bit is at the same position in both v0 and v1 Rx compl */
1154 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001155 return NULL;
1156
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001157 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001158 be_dws_le_to_cpu(compl, sizeof(*compl));
1159
1160 if (adapter->be3_native)
1161 be_parse_rx_compl_v1(adapter, compl, rxcp);
1162 else
1163 be_parse_rx_compl_v0(adapter, compl, rxcp);
1164
Sathya Perla15d72182011-03-21 20:49:26 +00001165 if (rxcp->vlanf) {
1166 /* vlanf could be wrongly set in some cards.
1167 * ignore if vtm is not set */
1168 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1169 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001170
Sathya Perla15d72182011-03-21 20:49:26 +00001171 if (!lancer_chip(adapter))
1172 rxcp->vid = swab16(rxcp->vid);
Sathya Perla2e588f82011-03-11 02:49:26 +00001173
Sathya Perla15d72182011-03-21 20:49:26 +00001174 if ((adapter->pvid == rxcp->vid) &&
1175 !adapter->vlan_tag[rxcp->vid])
1176 rxcp->vlanf = 0;
1177 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001178
1179 /* As the compl has been parsed, reset it; we wont touch it again */
1180 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181
Sathya Perla3abcded2010-10-03 22:12:27 -07001182 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183 return rxcp;
1184}
1185
Eric Dumazet1829b082011-03-01 05:48:12 +00001186static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001189
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001191 gfp |= __GFP_COMP;
1192 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001193}
1194
1195/*
1196 * Allocate a page, split it to fragments of size rx_frag_size and post as
1197 * receive buffers to BE
1198 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001199static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200{
Sathya Perla3abcded2010-10-03 22:12:27 -07001201 struct be_adapter *adapter = rxo->adapter;
1202 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001203 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001204 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 struct page *pagep = NULL;
1206 struct be_eth_rx_d *rxd;
1207 u64 page_dmaaddr = 0, frag_dmaaddr;
1208 u32 posted, page_offset = 0;
1209
Sathya Perla3abcded2010-10-03 22:12:27 -07001210 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1212 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001213 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001215 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001216 break;
1217 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001218 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1219 0, adapter->big_page_size,
1220 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221 page_info->page_offset = 0;
1222 } else {
1223 get_page(pagep);
1224 page_info->page_offset = page_offset + rx_frag_size;
1225 }
1226 page_offset = page_info->page_offset;
1227 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001228 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1230
1231 rxd = queue_head_node(rxq);
1232 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1233 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234
1235 /* Any space left in the current big page for another frag? */
1236 if ((page_offset + rx_frag_size + rx_frag_size) >
1237 adapter->big_page_size) {
1238 pagep = NULL;
1239 page_info->last_page_user = true;
1240 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001241
1242 prev_page_info = page_info;
1243 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001244 page_info = &page_info_tbl[rxq->head];
1245 }
1246 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001247 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248
1249 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001251 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001252 } else if (atomic_read(&rxq->used) == 0) {
1253 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001254 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256}
1257
Sathya Perla5fb379e2009-06-18 00:02:59 +00001258static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1261
1262 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1263 return NULL;
1264
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001265 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1267
1268 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1269
1270 queue_tail_inc(tx_cq);
1271 return txcp;
1272}
1273
1274static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1275{
1276 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001277 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1279 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001280 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1281 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001283 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001285 sent_skbs[txq->tail] = NULL;
1286
1287 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001288 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001290 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001292 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001293 unmap_tx_frag(&adapter->pdev->dev, wrb,
1294 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001295 unmap_skb_hdr = false;
1296
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001297 num_wrbs++;
1298 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001299 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300
1301 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303 kfree_skb(sent_skb);
1304}
1305
Sathya Perla859b1e42009-08-10 03:43:51 +00001306static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1307{
1308 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1309
1310 if (!eqe->evt)
1311 return NULL;
1312
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001313 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001314 eqe->evt = le32_to_cpu(eqe->evt);
1315 queue_tail_inc(&eq_obj->q);
1316 return eqe;
1317}
1318
1319static int event_handle(struct be_adapter *adapter,
1320 struct be_eq_obj *eq_obj)
1321{
1322 struct be_eq_entry *eqe;
1323 u16 num = 0;
1324
1325 while ((eqe = event_get(eq_obj)) != NULL) {
1326 eqe->evt = 0;
1327 num++;
1328 }
1329
1330 /* Deal with any spurious interrupts that come
1331 * without events
1332 */
1333 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1334 if (num)
1335 napi_schedule(&eq_obj->napi);
1336
1337 return num;
1338}
1339
1340/* Just read and notify events without processing them.
1341 * Used at the time of destroying event queues */
1342static void be_eq_clean(struct be_adapter *adapter,
1343 struct be_eq_obj *eq_obj)
1344{
1345 struct be_eq_entry *eqe;
1346 u16 num = 0;
1347
1348 while ((eqe = event_get(eq_obj)) != NULL) {
1349 eqe->evt = 0;
1350 num++;
1351 }
1352
1353 if (num)
1354 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1355}
1356
Sathya Perla3abcded2010-10-03 22:12:27 -07001357static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358{
1359 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001360 struct be_queue_info *rxq = &rxo->q;
1361 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001362 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363 u16 tail;
1364
1365 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001366 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1367 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001368 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001369 }
1370
1371 /* Then free posted rx buffer that were not used */
1372 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001373 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001374 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 put_page(page_info->page);
1376 memset(page_info, 0, sizeof(*page_info));
1377 }
1378 BUG_ON(atomic_read(&rxq->used));
1379}
1380
Sathya Perlaa8e91792009-08-10 03:42:43 +00001381static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001383 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001385 struct be_eth_tx_compl *txcp;
1386 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001387 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1388 struct sk_buff *sent_skb;
1389 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390
Sathya Perlaa8e91792009-08-10 03:42:43 +00001391 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1392 do {
1393 while ((txcp = be_tx_compl_get(tx_cq))) {
1394 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1395 wrb_index, txcp);
1396 be_tx_compl_process(adapter, end_idx);
1397 cmpl++;
1398 }
1399 if (cmpl) {
1400 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1401 cmpl = 0;
1402 }
1403
1404 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1405 break;
1406
1407 mdelay(1);
1408 } while (true);
1409
1410 if (atomic_read(&txq->used))
1411 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1412 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001413
1414 /* free posted tx for which compls will never arrive */
1415 while (atomic_read(&txq->used)) {
1416 sent_skb = sent_skbs[txq->tail];
1417 end_idx = txq->tail;
1418 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001419 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1420 txq->len);
Sathya Perlab03388d2010-02-18 00:37:17 +00001421 be_tx_compl_process(adapter, end_idx);
1422 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423}
1424
Sathya Perla5fb379e2009-06-18 00:02:59 +00001425static void be_mcc_queues_destroy(struct be_adapter *adapter)
1426{
1427 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001428
Sathya Perla8788fdc2009-07-27 22:52:03 +00001429 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001430 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001431 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001432 be_queue_free(adapter, q);
1433
Sathya Perla8788fdc2009-07-27 22:52:03 +00001434 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001435 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001436 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001437 be_queue_free(adapter, q);
1438}
1439
1440/* Must be called only after TX qs are created as MCC shares TX EQ */
1441static int be_mcc_queues_create(struct be_adapter *adapter)
1442{
1443 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001444
1445 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001446 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001447 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001448 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001449 goto err;
1450
1451 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001452 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001453 goto mcc_cq_free;
1454
1455 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001456 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001457 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1458 goto mcc_cq_destroy;
1459
1460 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001461 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001462 goto mcc_q_free;
1463
1464 return 0;
1465
1466mcc_q_free:
1467 be_queue_free(adapter, q);
1468mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001469 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001470mcc_cq_free:
1471 be_queue_free(adapter, cq);
1472err:
1473 return -1;
1474}
1475
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476static void be_tx_queues_destroy(struct be_adapter *adapter)
1477{
1478 struct be_queue_info *q;
1479
1480 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001481 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001482 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 be_queue_free(adapter, q);
1484
1485 q = &adapter->tx_obj.cq;
1486 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001487 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 be_queue_free(adapter, q);
1489
Sathya Perla859b1e42009-08-10 03:43:51 +00001490 /* Clear any residual events */
1491 be_eq_clean(adapter, &adapter->tx_eq);
1492
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 q = &adapter->tx_eq.q;
1494 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001495 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496 be_queue_free(adapter, q);
1497}
1498
1499static int be_tx_queues_create(struct be_adapter *adapter)
1500{
1501 struct be_queue_info *eq, *q, *cq;
1502
1503 adapter->tx_eq.max_eqd = 0;
1504 adapter->tx_eq.min_eqd = 0;
1505 adapter->tx_eq.cur_eqd = 96;
1506 adapter->tx_eq.enable_aic = false;
1507 /* Alloc Tx Event queue */
1508 eq = &adapter->tx_eq.q;
1509 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1510 return -1;
1511
1512 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001513 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001515
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001516 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001517
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001518
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 /* Alloc TX eth compl queue */
1520 cq = &adapter->tx_obj.cq;
1521 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1522 sizeof(struct be_eth_tx_compl)))
1523 goto tx_eq_destroy;
1524
1525 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001526 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527 goto tx_cq_free;
1528
1529 /* Alloc TX eth queue */
1530 q = &adapter->tx_obj.q;
1531 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1532 goto tx_cq_destroy;
1533
1534 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001535 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 goto tx_q_free;
1537 return 0;
1538
1539tx_q_free:
1540 be_queue_free(adapter, q);
1541tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001542 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543tx_cq_free:
1544 be_queue_free(adapter, cq);
1545tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001546 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547tx_eq_free:
1548 be_queue_free(adapter, eq);
1549 return -1;
1550}
1551
1552static void be_rx_queues_destroy(struct be_adapter *adapter)
1553{
1554 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001555 struct be_rx_obj *rxo;
1556 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557
Sathya Perla3abcded2010-10-03 22:12:27 -07001558 for_all_rx_queues(adapter, rxo, i) {
1559 q = &rxo->q;
1560 if (q->created) {
1561 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1562 /* After the rxq is invalidated, wait for a grace time
1563 * of 1ms for all dma to end and the flush compl to
1564 * arrive
1565 */
1566 mdelay(1);
1567 be_rx_q_clean(adapter, rxo);
1568 }
1569 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001570
Sathya Perla3abcded2010-10-03 22:12:27 -07001571 q = &rxo->cq;
1572 if (q->created)
1573 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1574 be_queue_free(adapter, q);
1575
1576 /* Clear any residual events */
1577 q = &rxo->rx_eq.q;
1578 if (q->created) {
1579 be_eq_clean(adapter, &rxo->rx_eq);
1580 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1581 }
1582 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584}
1585
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001586static u32 be_num_rxqs_want(struct be_adapter *adapter)
1587{
1588 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1589 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1590 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1591 } else {
1592 dev_warn(&adapter->pdev->dev,
1593 "No support for multiple RX queues\n");
1594 return 1;
1595 }
1596}
1597
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598static int be_rx_queues_create(struct be_adapter *adapter)
1599{
1600 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001601 struct be_rx_obj *rxo;
1602 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001604 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1605 msix_enabled(adapter) ?
1606 adapter->num_msix_vec - 1 : 1);
1607 if (adapter->num_rx_qs != MAX_RX_QS)
1608 dev_warn(&adapter->pdev->dev,
1609 "Can create only %d RX queues", adapter->num_rx_qs);
1610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001612 for_all_rx_queues(adapter, rxo, i) {
1613 rxo->adapter = adapter;
1614 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1615 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616
Sathya Perla3abcded2010-10-03 22:12:27 -07001617 /* EQ */
1618 eq = &rxo->rx_eq.q;
1619 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1620 sizeof(struct be_eq_entry));
1621 if (rc)
1622 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623
Sathya Perla3abcded2010-10-03 22:12:27 -07001624 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1625 if (rc)
1626 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001628 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001629
Sathya Perla3abcded2010-10-03 22:12:27 -07001630 /* CQ */
1631 cq = &rxo->cq;
1632 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1633 sizeof(struct be_eth_rx_compl));
1634 if (rc)
1635 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636
Sathya Perla3abcded2010-10-03 22:12:27 -07001637 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1638 if (rc)
1639 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001640 /* Rx Q */
1641 q = &rxo->q;
1642 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1643 sizeof(struct be_eth_rx_d));
1644 if (rc)
1645 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646
Sathya Perla3abcded2010-10-03 22:12:27 -07001647 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1648 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1649 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1650 if (rc)
1651 goto err;
1652 }
1653
1654 if (be_multi_rxq(adapter)) {
1655 u8 rsstable[MAX_RSS_QS];
1656
1657 for_all_rss_queues(adapter, rxo, i)
1658 rsstable[i] = rxo->rss_id;
1659
1660 rc = be_cmd_rss_config(adapter, rsstable,
1661 adapter->num_rx_qs - 1);
1662 if (rc)
1663 goto err;
1664 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665
1666 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001667err:
1668 be_rx_queues_destroy(adapter);
1669 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001672static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001673{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001674 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1675 if (!eqe->evt)
1676 return false;
1677 else
1678 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001679}
1680
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681static irqreturn_t be_intx(int irq, void *dev)
1682{
1683 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001684 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001685 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001687 if (lancer_chip(adapter)) {
1688 if (event_peek(&adapter->tx_eq))
1689 tx = event_handle(adapter, &adapter->tx_eq);
1690 for_all_rx_queues(adapter, rxo, i) {
1691 if (event_peek(&rxo->rx_eq))
1692 rx |= event_handle(adapter, &rxo->rx_eq);
1693 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001695 if (!(tx || rx))
1696 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001697
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001698 } else {
1699 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1700 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1701 if (!isr)
1702 return IRQ_NONE;
1703
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001704 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001705 event_handle(adapter, &adapter->tx_eq);
1706
1707 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001708 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001709 event_handle(adapter, &rxo->rx_eq);
1710 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001711 }
Sathya Perlac001c212009-07-01 01:06:07 +00001712
Sathya Perla8788fdc2009-07-27 22:52:03 +00001713 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714}
1715
1716static irqreturn_t be_msix_rx(int irq, void *dev)
1717{
Sathya Perla3abcded2010-10-03 22:12:27 -07001718 struct be_rx_obj *rxo = dev;
1719 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720
Sathya Perla3abcded2010-10-03 22:12:27 -07001721 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
1723 return IRQ_HANDLED;
1724}
1725
Sathya Perla5fb379e2009-06-18 00:02:59 +00001726static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727{
1728 struct be_adapter *adapter = dev;
1729
Sathya Perla8788fdc2009-07-27 22:52:03 +00001730 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731
1732 return IRQ_HANDLED;
1733}
1734
Sathya Perla2e588f82011-03-11 02:49:26 +00001735static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736{
Sathya Perla2e588f82011-03-11 02:49:26 +00001737 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738}
1739
stephen hemminger49b05222010-10-21 07:50:48 +00001740static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741{
1742 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001743 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1744 struct be_adapter *adapter = rxo->adapter;
1745 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001746 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 u32 work_done;
1748
Sathya Perla3abcded2010-10-03 22:12:27 -07001749 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001751 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752 if (!rxcp)
1753 break;
1754
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001755 /* Ignore flush completions */
Sathya Perla2e588f82011-03-11 02:49:26 +00001756 if (rxcp->num_rcvd) {
1757 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001758 be_rx_compl_process_gro(adapter, rxo, rxcp);
1759 else
1760 be_rx_compl_process(adapter, rxo, rxcp);
1761 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001762 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763 }
1764
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001766 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001767 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768
1769 /* All consumed */
1770 if (work_done < budget) {
1771 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001772 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773 } else {
1774 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001775 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776 }
1777 return work_done;
1778}
1779
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001780/* As TX and MCC share the same EQ check for both TX and MCC completions.
1781 * For TX/MCC we don't honour budget; consume everything
1782 */
1783static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001785 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1786 struct be_adapter *adapter =
1787 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001788 struct be_queue_info *txq = &adapter->tx_obj.q;
1789 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001790 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001791 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792 u16 end_idx;
1793
Sathya Perla5fb379e2009-06-18 00:02:59 +00001794 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001796 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001798 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799 }
1800
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001801 mcc_compl = be_process_mcc(adapter, &status);
1802
1803 napi_complete(napi);
1804
1805 if (mcc_compl) {
1806 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1807 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1808 }
1809
1810 if (tx_compl) {
1811 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001812
1813 /* As Tx wrbs have been freed up, wake up netdev queue if
1814 * it was stopped due to lack of tx wrbs.
1815 */
1816 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001818 netif_wake_queue(adapter->netdev);
1819 }
1820
Sathya Perla3abcded2010-10-03 22:12:27 -07001821 tx_stats(adapter)->be_tx_events++;
1822 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
1825 return 1;
1826}
1827
Ajit Khaparded053de92010-09-03 06:23:30 +00001828void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001829{
1830 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1831 u32 i;
1832
1833 pci_read_config_dword(adapter->pdev,
1834 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1835 pci_read_config_dword(adapter->pdev,
1836 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1837 pci_read_config_dword(adapter->pdev,
1838 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1839 pci_read_config_dword(adapter->pdev,
1840 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1841
1842 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1843 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1844
Ajit Khaparded053de92010-09-03 06:23:30 +00001845 if (ue_status_lo || ue_status_hi) {
1846 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001847 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001848 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1849 }
1850
Ajit Khaparde7c185272010-07-29 06:16:33 +00001851 if (ue_status_lo) {
1852 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1853 if (ue_status_lo & 1)
1854 dev_err(&adapter->pdev->dev,
1855 "UE: %s bit set\n", ue_status_low_desc[i]);
1856 }
1857 }
1858 if (ue_status_hi) {
1859 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1860 if (ue_status_hi & 1)
1861 dev_err(&adapter->pdev->dev,
1862 "UE: %s bit set\n", ue_status_hi_desc[i]);
1863 }
1864 }
1865
1866}
1867
Sathya Perlaea1dae12009-03-19 23:56:20 -07001868static void be_worker(struct work_struct *work)
1869{
1870 struct be_adapter *adapter =
1871 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001872 struct be_rx_obj *rxo;
1873 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001874
Sathya Perla16da8252011-03-21 20:49:27 +00001875 if (!adapter->ue_detected && !lancer_chip(adapter))
1876 be_detect_dump_ue(adapter);
1877
Somnath Koturf203af72010-10-25 23:01:03 +00001878 /* when interrupts are not yet enabled, just reap any pending
1879 * mcc completions */
1880 if (!netif_running(adapter->netdev)) {
1881 int mcc_compl, status = 0;
1882
1883 mcc_compl = be_process_mcc(adapter, &status);
1884
1885 if (mcc_compl) {
1886 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1887 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1888 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00001889
Somnath Koturf203af72010-10-25 23:01:03 +00001890 goto reschedule;
1891 }
1892
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001893 if (!adapter->stats_cmd_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001894 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001895
Sathya Perla4097f662009-03-24 16:40:13 -07001896 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001897
Sathya Perla3abcded2010-10-03 22:12:27 -07001898 for_all_rx_queues(adapter, rxo, i) {
1899 be_rx_rate_update(rxo);
1900 be_rx_eqd_update(adapter, rxo);
1901
1902 if (rxo->rx_post_starved) {
1903 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00001904 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07001905 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001906 }
1907
Somnath Koturf203af72010-10-25 23:01:03 +00001908reschedule:
Sathya Perlaea1dae12009-03-19 23:56:20 -07001909 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1910}
1911
Sathya Perla8d56ff12009-11-22 22:02:26 +00001912static void be_msix_disable(struct be_adapter *adapter)
1913{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001914 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00001915 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001916 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001917 }
1918}
1919
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920static void be_msix_enable(struct be_adapter *adapter)
1921{
Sathya Perla3abcded2010-10-03 22:12:27 -07001922#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001923 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001925 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07001926
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001927 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928 adapter->msix_entries[i].entry = i;
1929
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001930 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07001931 if (status == 0) {
1932 goto done;
1933 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001934 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07001935 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001936 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07001937 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07001938 }
1939 return;
1940done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001941 adapter->num_msix_vec = num_vec;
1942 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943}
1944
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001945static void be_sriov_enable(struct be_adapter *adapter)
1946{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001947 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001948#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001949 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00001950 int status, pos;
1951 u16 nvfs;
1952
1953 pos = pci_find_ext_capability(adapter->pdev,
1954 PCI_EXT_CAP_ID_SRIOV);
1955 pci_read_config_word(adapter->pdev,
1956 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1957
1958 if (num_vfs > nvfs) {
1959 dev_info(&adapter->pdev->dev,
1960 "Device supports %d VFs and not %d\n",
1961 nvfs, num_vfs);
1962 num_vfs = nvfs;
1963 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001964
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001965 status = pci_enable_sriov(adapter->pdev, num_vfs);
1966 adapter->sriov_enabled = status ? false : true;
1967 }
1968#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001969}
1970
1971static void be_sriov_disable(struct be_adapter *adapter)
1972{
1973#ifdef CONFIG_PCI_IOV
1974 if (adapter->sriov_enabled) {
1975 pci_disable_sriov(adapter->pdev);
1976 adapter->sriov_enabled = false;
1977 }
1978#endif
1979}
1980
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001981static inline int be_msix_vec_get(struct be_adapter *adapter,
1982 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001984 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00001985}
1986
1987static int be_request_irq(struct be_adapter *adapter,
1988 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001989 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001990{
1991 struct net_device *netdev = adapter->netdev;
1992 int vec;
1993
1994 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001995 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001996 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00001997}
1998
Sathya Perla3abcded2010-10-03 22:12:27 -07001999static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2000 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002001{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002002 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002003 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004}
2005
2006static int be_msix_register(struct be_adapter *adapter)
2007{
Sathya Perla3abcded2010-10-03 22:12:27 -07002008 struct be_rx_obj *rxo;
2009 int status, i;
2010 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011
Sathya Perla3abcded2010-10-03 22:12:27 -07002012 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2013 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002014 if (status)
2015 goto err;
2016
Sathya Perla3abcded2010-10-03 22:12:27 -07002017 for_all_rx_queues(adapter, rxo, i) {
2018 sprintf(qname, "rxq%d", i);
2019 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2020 qname, rxo);
2021 if (status)
2022 goto err_msix;
2023 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002024
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002025 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002026
Sathya Perla3abcded2010-10-03 22:12:27 -07002027err_msix:
2028 be_free_irq(adapter, &adapter->tx_eq, adapter);
2029
2030 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2031 be_free_irq(adapter, &rxo->rx_eq, rxo);
2032
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033err:
2034 dev_warn(&adapter->pdev->dev,
2035 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002036 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037 return status;
2038}
2039
2040static int be_irq_register(struct be_adapter *adapter)
2041{
2042 struct net_device *netdev = adapter->netdev;
2043 int status;
2044
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002045 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002046 status = be_msix_register(adapter);
2047 if (status == 0)
2048 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002049 /* INTx is not supported for VF */
2050 if (!be_physfn(adapter))
2051 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052 }
2053
2054 /* INTx */
2055 netdev->irq = adapter->pdev->irq;
2056 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2057 adapter);
2058 if (status) {
2059 dev_err(&adapter->pdev->dev,
2060 "INTx request IRQ failed - err %d\n", status);
2061 return status;
2062 }
2063done:
2064 adapter->isr_registered = true;
2065 return 0;
2066}
2067
2068static void be_irq_unregister(struct be_adapter *adapter)
2069{
2070 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002071 struct be_rx_obj *rxo;
2072 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073
2074 if (!adapter->isr_registered)
2075 return;
2076
2077 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002078 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079 free_irq(netdev->irq, adapter);
2080 goto done;
2081 }
2082
2083 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002084 be_free_irq(adapter, &adapter->tx_eq, adapter);
2085
2086 for_all_rx_queues(adapter, rxo, i)
2087 be_free_irq(adapter, &rxo->rx_eq, rxo);
2088
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002089done:
2090 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091}
2092
Sathya Perla889cd4b2010-05-30 23:33:45 +00002093static int be_close(struct net_device *netdev)
2094{
2095 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002096 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002097 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002098 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002099
Sathya Perla889cd4b2010-05-30 23:33:45 +00002100 be_async_mcc_disable(adapter);
2101
Sathya Perla889cd4b2010-05-30 23:33:45 +00002102 netif_carrier_off(netdev);
2103 adapter->link_up = false;
2104
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002105 if (!lancer_chip(adapter))
2106 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002107
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002108 for_all_rx_queues(adapter, rxo, i)
2109 napi_disable(&rxo->rx_eq.napi);
2110
2111 napi_disable(&tx_eq->napi);
2112
2113 if (lancer_chip(adapter)) {
2114 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2115 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2116 for_all_rx_queues(adapter, rxo, i)
2117 be_cq_notify(adapter, rxo->cq.id, false, 0);
2118 }
2119
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002120 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002121 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002122 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002123
2124 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002125 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002126 synchronize_irq(vec);
2127 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002128 } else {
2129 synchronize_irq(netdev->irq);
2130 }
2131 be_irq_unregister(adapter);
2132
Sathya Perla889cd4b2010-05-30 23:33:45 +00002133 /* Wait for all pending tx completions to arrive so that
2134 * all tx skbs are freed.
2135 */
2136 be_tx_compl_clean(adapter);
2137
2138 return 0;
2139}
2140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141static int be_open(struct net_device *netdev)
2142{
2143 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002145 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002146 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002147 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002148 u8 mac_speed;
2149 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002150
Sathya Perla3abcded2010-10-03 22:12:27 -07002151 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002152 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002153 napi_enable(&rxo->rx_eq.napi);
2154 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002155 napi_enable(&tx_eq->napi);
2156
2157 be_irq_register(adapter);
2158
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002159 if (!lancer_chip(adapter))
2160 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002161
2162 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002163 for_all_rx_queues(adapter, rxo, i) {
2164 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2165 be_cq_notify(adapter, rxo->cq.id, true, 0);
2166 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002167 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002168
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002169 /* Now that interrupts are on we can process async mcc */
2170 be_async_mcc_enable(adapter);
2171
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002172 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2173 &link_speed);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002174 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002175 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002176 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002177
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002178 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002179 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002180 if (status)
2181 goto err;
2182
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002183 status = be_cmd_set_flow_control(adapter,
2184 adapter->tx_fc, adapter->rx_fc);
2185 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002186 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002187 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002188
Sathya Perla889cd4b2010-05-30 23:33:45 +00002189 return 0;
2190err:
2191 be_close(adapter->netdev);
2192 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002193}
2194
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002195static int be_setup_wol(struct be_adapter *adapter, bool enable)
2196{
2197 struct be_dma_mem cmd;
2198 int status = 0;
2199 u8 mac[ETH_ALEN];
2200
2201 memset(mac, 0, ETH_ALEN);
2202
2203 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002204 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2205 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002206 if (cmd.va == NULL)
2207 return -1;
2208 memset(cmd.va, 0, cmd.size);
2209
2210 if (enable) {
2211 status = pci_write_config_dword(adapter->pdev,
2212 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2213 if (status) {
2214 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002215 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002216 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2217 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002218 return status;
2219 }
2220 status = be_cmd_enable_magic_wol(adapter,
2221 adapter->netdev->dev_addr, &cmd);
2222 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2223 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2224 } else {
2225 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2226 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2227 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2228 }
2229
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002230 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002231 return status;
2232}
2233
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002234/*
2235 * Generate a seed MAC address from the PF MAC Address using jhash.
2236 * MAC Address for VFs are assigned incrementally starting from the seed.
2237 * These addresses are programmed in the ASIC by the PF and the VF driver
2238 * queries for the MAC address during its probe.
2239 */
2240static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2241{
2242 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002243 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002244 u8 mac[ETH_ALEN];
2245
2246 be_vf_eth_addr_generate(adapter, mac);
2247
2248 for (vf = 0; vf < num_vfs; vf++) {
2249 status = be_cmd_pmac_add(adapter, mac,
2250 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002251 &adapter->vf_cfg[vf].vf_pmac_id,
2252 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002253 if (status)
2254 dev_err(&adapter->pdev->dev,
2255 "Mac address add failed for VF %d\n", vf);
2256 else
2257 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2258
2259 mac[5] += 1;
2260 }
2261 return status;
2262}
2263
2264static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2265{
2266 u32 vf;
2267
2268 for (vf = 0; vf < num_vfs; vf++) {
2269 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2270 be_cmd_pmac_del(adapter,
2271 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002272 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002273 }
2274}
2275
Sathya Perla5fb379e2009-06-18 00:02:59 +00002276static int be_setup(struct be_adapter *adapter)
2277{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002278 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002279 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002281 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002283 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2284 BE_IF_FLAGS_BROADCAST |
2285 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002286
2287 if (be_physfn(adapter)) {
2288 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2289 BE_IF_FLAGS_PROMISCUOUS |
2290 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2291 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002292
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002293 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002294 cap_flags |= BE_IF_FLAGS_RSS;
2295 en_flags |= BE_IF_FLAGS_RSS;
2296 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002297 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002298
2299 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2300 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002301 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 if (status != 0)
2303 goto do_none;
2304
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002305 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002306 if (adapter->sriov_enabled) {
2307 while (vf < num_vfs) {
2308 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2309 BE_IF_FLAGS_BROADCAST;
2310 status = be_cmd_if_create(adapter, cap_flags,
2311 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002312 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002313 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002314 if (status) {
2315 dev_err(&adapter->pdev->dev,
2316 "Interface Create failed for VF %d\n",
2317 vf);
2318 goto if_destroy;
2319 }
2320 adapter->vf_cfg[vf].vf_pmac_id =
2321 BE_INVALID_PMAC_ID;
2322 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002323 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002324 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002325 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002326 status = be_cmd_mac_addr_query(adapter, mac,
2327 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2328 if (!status) {
2329 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2330 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2331 }
2332 }
2333
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334 status = be_tx_queues_create(adapter);
2335 if (status != 0)
2336 goto if_destroy;
2337
2338 status = be_rx_queues_create(adapter);
2339 if (status != 0)
2340 goto tx_qs_destroy;
2341
Sathya Perla5fb379e2009-06-18 00:02:59 +00002342 status = be_mcc_queues_create(adapter);
2343 if (status != 0)
2344 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002346 adapter->link_speed = -1;
2347
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002348 return 0;
2349
Sathya Perla5fb379e2009-06-18 00:02:59 +00002350rx_qs_destroy:
2351 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352tx_qs_destroy:
2353 be_tx_queues_destroy(adapter);
2354if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002355 if (be_physfn(adapter) && adapter->sriov_enabled)
2356 for (vf = 0; vf < num_vfs; vf++)
2357 if (adapter->vf_cfg[vf].vf_if_handle)
2358 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002359 adapter->vf_cfg[vf].vf_if_handle,
2360 vf + 1);
2361 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362do_none:
2363 return status;
2364}
2365
Sathya Perla5fb379e2009-06-18 00:02:59 +00002366static int be_clear(struct be_adapter *adapter)
2367{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002368 int vf;
2369
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002370 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002371 be_vf_eth_addr_rem(adapter);
2372
Sathya Perla1a8887d2009-08-17 00:58:41 +00002373 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002374 be_rx_queues_destroy(adapter);
2375 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002376 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002377
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002378 if (be_physfn(adapter) && adapter->sriov_enabled)
2379 for (vf = 0; vf < num_vfs; vf++)
2380 if (adapter->vf_cfg[vf].vf_if_handle)
2381 be_cmd_if_destroy(adapter,
2382 adapter->vf_cfg[vf].vf_if_handle,
2383 vf + 1);
2384
Ajit Khaparde658681f2011-02-11 13:34:46 +00002385 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002386
Sathya Perla2243e2e2009-11-22 22:02:03 +00002387 /* tell fw we're done with firing cmds */
2388 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002389 return 0;
2390}
2391
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002392
Ajit Khaparde84517482009-09-04 03:12:16 +00002393#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002394static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002395 const u8 *p, u32 img_start, int image_size,
2396 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002397{
2398 u32 crc_offset;
2399 u8 flashed_crc[4];
2400 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002401
2402 crc_offset = hdr_size + img_start + image_size - 4;
2403
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002404 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002405
2406 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002407 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002408 if (status) {
2409 dev_err(&adapter->pdev->dev,
2410 "could not get crc from flash, not flashing redboot\n");
2411 return false;
2412 }
2413
2414 /*update redboot only if crc does not match*/
2415 if (!memcmp(flashed_crc, p, 4))
2416 return false;
2417 else
2418 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002419}
2420
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002421static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002422 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002423 struct be_dma_mem *flash_cmd, int num_of_images)
2424
Ajit Khaparde84517482009-09-04 03:12:16 +00002425{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002426 int status = 0, i, filehdr_size = 0;
2427 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002428 int num_bytes;
2429 const u8 *p = fw->data;
2430 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002431 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002432 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002433
Joe Perches215faf92010-12-21 02:16:10 -08002434 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002435 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2436 FLASH_IMAGE_MAX_SIZE_g3},
2437 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2438 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2439 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2440 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2441 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2442 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2443 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2444 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2445 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2446 FLASH_IMAGE_MAX_SIZE_g3},
2447 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2448 FLASH_IMAGE_MAX_SIZE_g3},
2449 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002450 FLASH_IMAGE_MAX_SIZE_g3},
2451 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2452 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002453 };
Joe Perches215faf92010-12-21 02:16:10 -08002454 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002455 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2456 FLASH_IMAGE_MAX_SIZE_g2},
2457 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2458 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2459 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2460 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2461 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2462 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2463 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2464 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2465 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2466 FLASH_IMAGE_MAX_SIZE_g2},
2467 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2468 FLASH_IMAGE_MAX_SIZE_g2},
2469 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2470 FLASH_IMAGE_MAX_SIZE_g2}
2471 };
2472
2473 if (adapter->generation == BE_GEN3) {
2474 pflashcomp = gen3_flash_types;
2475 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002476 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002477 } else {
2478 pflashcomp = gen2_flash_types;
2479 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002480 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002481 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002482 for (i = 0; i < num_comp; i++) {
2483 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2484 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2485 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002486 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2487 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002488 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2489 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002490 continue;
2491 p = fw->data;
2492 p += filehdr_size + pflashcomp[i].offset
2493 + (num_of_images * sizeof(struct image_hdr));
2494 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002495 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002496 total_bytes = pflashcomp[i].size;
2497 while (total_bytes) {
2498 if (total_bytes > 32*1024)
2499 num_bytes = 32*1024;
2500 else
2501 num_bytes = total_bytes;
2502 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002503
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002504 if (!total_bytes)
2505 flash_op = FLASHROM_OPER_FLASH;
2506 else
2507 flash_op = FLASHROM_OPER_SAVE;
2508 memcpy(req->params.data_buf, p, num_bytes);
2509 p += num_bytes;
2510 status = be_cmd_write_flashrom(adapter, flash_cmd,
2511 pflashcomp[i].optype, flash_op, num_bytes);
2512 if (status) {
2513 dev_err(&adapter->pdev->dev,
2514 "cmd to write to flash rom failed.\n");
2515 return -1;
2516 }
2517 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002518 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002519 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002520 return 0;
2521}
2522
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002523static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2524{
2525 if (fhdr == NULL)
2526 return 0;
2527 if (fhdr->build[0] == '3')
2528 return BE_GEN3;
2529 else if (fhdr->build[0] == '2')
2530 return BE_GEN2;
2531 else
2532 return 0;
2533}
2534
Ajit Khaparde84517482009-09-04 03:12:16 +00002535int be_load_fw(struct be_adapter *adapter, u8 *func)
2536{
2537 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2538 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002539 struct flash_file_hdr_g2 *fhdr;
2540 struct flash_file_hdr_g3 *fhdr3;
2541 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002542 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002543 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002544 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002545
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002546 if (!netif_running(adapter->netdev)) {
2547 dev_err(&adapter->pdev->dev,
2548 "Firmware load not allowed (interface is down)\n");
2549 return -EPERM;
2550 }
2551
Ajit Khaparde84517482009-09-04 03:12:16 +00002552 strcpy(fw_file, func);
2553
2554 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2555 if (status)
2556 goto fw_exit;
2557
2558 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002559 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002560 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2561
Ajit Khaparde84517482009-09-04 03:12:16 +00002562 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002563 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2564 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002565 if (!flash_cmd.va) {
2566 status = -ENOMEM;
2567 dev_err(&adapter->pdev->dev,
2568 "Memory allocation failure while flashing\n");
2569 goto fw_exit;
2570 }
2571
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002572 if ((adapter->generation == BE_GEN3) &&
2573 (get_ufigen_type(fhdr) == BE_GEN3)) {
2574 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002575 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2576 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002577 img_hdr_ptr = (struct image_hdr *) (fw->data +
2578 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002579 i * sizeof(struct image_hdr)));
2580 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2581 status = be_flash_data(adapter, fw, &flash_cmd,
2582 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002583 }
2584 } else if ((adapter->generation == BE_GEN2) &&
2585 (get_ufigen_type(fhdr) == BE_GEN2)) {
2586 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2587 } else {
2588 dev_err(&adapter->pdev->dev,
2589 "UFI and Interface are not compatible for flashing\n");
2590 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002591 }
2592
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002593 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2594 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002595 if (status) {
2596 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2597 goto fw_exit;
2598 }
2599
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002600 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002601
2602fw_exit:
2603 release_firmware(fw);
2604 return status;
2605}
2606
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002607static struct net_device_ops be_netdev_ops = {
2608 .ndo_open = be_open,
2609 .ndo_stop = be_close,
2610 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002611 .ndo_set_rx_mode = be_set_multicast_list,
2612 .ndo_set_mac_address = be_mac_addr_set,
2613 .ndo_change_mtu = be_change_mtu,
2614 .ndo_validate_addr = eth_validate_addr,
2615 .ndo_vlan_rx_register = be_vlan_register,
2616 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2617 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002618 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002619 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002620 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002621 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002622};
2623
2624static void be_netdev_init(struct net_device *netdev)
2625{
2626 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002627 struct be_rx_obj *rxo;
2628 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002629
2630 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Michał Mirosław79032642010-11-30 06:38:00 +00002631 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2632 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ajit Khaparde49e4b8472010-06-14 04:56:07 +00002633 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002634
Ajit Khaparde4b972912011-04-06 18:07:43 +00002635 if (be_multi_rxq(adapter))
2636 netdev->features |= NETIF_F_RXHASH;
2637
Michał Mirosław79032642010-11-30 06:38:00 +00002638 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2639 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002640
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002641 if (lancer_chip(adapter))
2642 netdev->vlan_features |= NETIF_F_TSO6;
2643
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002644 netdev->flags |= IFF_MULTICAST;
2645
Ajit Khaparde728a9972009-04-13 15:41:22 -07002646 adapter->rx_csum = true;
2647
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002648 /* Default settings for Rx and Tx flow control */
2649 adapter->rx_fc = true;
2650 adapter->tx_fc = true;
2651
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002652 netif_set_gso_max_size(netdev, 65535);
2653
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002654 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2655
2656 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2657
Sathya Perla3abcded2010-10-03 22:12:27 -07002658 for_all_rx_queues(adapter, rxo, i)
2659 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2660 BE_NAPI_WEIGHT);
2661
Sathya Perla5fb379e2009-06-18 00:02:59 +00002662 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002663 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002664}
2665
2666static void be_unmap_pci_bars(struct be_adapter *adapter)
2667{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002668 if (adapter->csr)
2669 iounmap(adapter->csr);
2670 if (adapter->db)
2671 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002672 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002673 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002674}
2675
2676static int be_map_pci_bars(struct be_adapter *adapter)
2677{
2678 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002679 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002680
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002681 if (lancer_chip(adapter)) {
2682 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2683 pci_resource_len(adapter->pdev, 0));
2684 if (addr == NULL)
2685 return -ENOMEM;
2686 adapter->db = addr;
2687 return 0;
2688 }
2689
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002690 if (be_physfn(adapter)) {
2691 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2692 pci_resource_len(adapter->pdev, 2));
2693 if (addr == NULL)
2694 return -ENOMEM;
2695 adapter->csr = addr;
2696 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002697
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002698 if (adapter->generation == BE_GEN2) {
2699 pcicfg_reg = 1;
2700 db_reg = 4;
2701 } else {
2702 pcicfg_reg = 0;
2703 if (be_physfn(adapter))
2704 db_reg = 4;
2705 else
2706 db_reg = 0;
2707 }
2708 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2709 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710 if (addr == NULL)
2711 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002712 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002713
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002714 if (be_physfn(adapter)) {
2715 addr = ioremap_nocache(
2716 pci_resource_start(adapter->pdev, pcicfg_reg),
2717 pci_resource_len(adapter->pdev, pcicfg_reg));
2718 if (addr == NULL)
2719 goto pci_map_err;
2720 adapter->pcicfg = addr;
2721 } else
2722 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002723
2724 return 0;
2725pci_map_err:
2726 be_unmap_pci_bars(adapter);
2727 return -ENOMEM;
2728}
2729
2730
2731static void be_ctrl_cleanup(struct be_adapter *adapter)
2732{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002733 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002734
2735 be_unmap_pci_bars(adapter);
2736
2737 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002738 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2739 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002740
2741 mem = &adapter->mc_cmd_mem;
2742 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002743 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2744 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002745}
2746
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747static int be_ctrl_init(struct be_adapter *adapter)
2748{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002749 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2750 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002751 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002752 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002753
2754 status = be_map_pci_bars(adapter);
2755 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002756 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002757
2758 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002759 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2760 mbox_mem_alloc->size,
2761 &mbox_mem_alloc->dma,
2762 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002763 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002764 status = -ENOMEM;
2765 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002766 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002767
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002768 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2769 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2770 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2771 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002772
2773 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002774 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2775 mc_cmd_mem->size, &mc_cmd_mem->dma,
2776 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002777 if (mc_cmd_mem->va == NULL) {
2778 status = -ENOMEM;
2779 goto free_mbox;
2780 }
2781 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2782
Ivan Vecera29849612010-12-14 05:43:19 +00002783 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002784 spin_lock_init(&adapter->mcc_lock);
2785 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002787 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002788 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002789 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002790
2791free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002792 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2793 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002794
2795unmap_pci_bars:
2796 be_unmap_pci_bars(adapter);
2797
2798done:
2799 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002800}
2801
2802static void be_stats_cleanup(struct be_adapter *adapter)
2803{
Sathya Perla3abcded2010-10-03 22:12:27 -07002804 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002805
2806 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002807 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2808 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002809}
2810
2811static int be_stats_init(struct be_adapter *adapter)
2812{
Sathya Perla3abcded2010-10-03 22:12:27 -07002813 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002814
2815 cmd->size = sizeof(struct be_cmd_req_get_stats);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002816 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2817 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002818 if (cmd->va == NULL)
2819 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002820 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002821 return 0;
2822}
2823
2824static void __devexit be_remove(struct pci_dev *pdev)
2825{
2826 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002827
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002828 if (!adapter)
2829 return;
2830
Somnath Koturf203af72010-10-25 23:01:03 +00002831 cancel_delayed_work_sync(&adapter->work);
2832
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002833 unregister_netdev(adapter->netdev);
2834
Sathya Perla5fb379e2009-06-18 00:02:59 +00002835 be_clear(adapter);
2836
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002837 be_stats_cleanup(adapter);
2838
2839 be_ctrl_cleanup(adapter);
2840
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002841 be_sriov_disable(adapter);
2842
Sathya Perla8d56ff12009-11-22 22:02:26 +00002843 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002844
2845 pci_set_drvdata(pdev, NULL);
2846 pci_release_regions(pdev);
2847 pci_disable_device(pdev);
2848
2849 free_netdev(adapter->netdev);
2850}
2851
Sathya Perla2243e2e2009-11-22 22:02:03 +00002852static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002854 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002855 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002856
Sathya Perla8788fdc2009-07-27 22:52:03 +00002857 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002858 if (status)
2859 return status;
2860
Sathya Perla3abcded2010-10-03 22:12:27 -07002861 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2862 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002863 if (status)
2864 return status;
2865
2866 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002867
2868 if (be_physfn(adapter)) {
2869 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002870 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002871
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002872 if (status)
2873 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002874
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002875 if (!is_valid_ether_addr(mac))
2876 return -EADDRNOTAVAIL;
2877
2878 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2879 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2880 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002881
Ajit Khaparde3486be22010-07-23 02:04:54 +00002882 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002883 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2884 else
2885 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2886
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002887 status = be_cmd_get_cntl_attributes(adapter);
2888 if (status)
2889 return status;
2890
Sathya Perla2e588f82011-03-11 02:49:26 +00002891 be_cmd_check_native_mode(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002892 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002893}
2894
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002895static int be_dev_family_check(struct be_adapter *adapter)
2896{
2897 struct pci_dev *pdev = adapter->pdev;
2898 u32 sli_intf = 0, if_type;
2899
2900 switch (pdev->device) {
2901 case BE_DEVICE_ID1:
2902 case OC_DEVICE_ID1:
2903 adapter->generation = BE_GEN2;
2904 break;
2905 case BE_DEVICE_ID2:
2906 case OC_DEVICE_ID2:
2907 adapter->generation = BE_GEN3;
2908 break;
2909 case OC_DEVICE_ID3:
2910 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2911 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2912 SLI_INTF_IF_TYPE_SHIFT;
2913
2914 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2915 if_type != 0x02) {
2916 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2917 return -EINVAL;
2918 }
2919 if (num_vfs > 0) {
2920 dev_err(&pdev->dev, "VFs not supported\n");
2921 return -EINVAL;
2922 }
2923 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2924 SLI_INTF_FAMILY_SHIFT);
2925 adapter->generation = BE_GEN3;
2926 break;
2927 default:
2928 adapter->generation = 0;
2929 }
2930 return 0;
2931}
2932
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00002933static int lancer_wait_ready(struct be_adapter *adapter)
2934{
2935#define SLIPORT_READY_TIMEOUT 500
2936 u32 sliport_status;
2937 int status = 0, i;
2938
2939 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2940 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2941 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2942 break;
2943
2944 msleep(20);
2945 }
2946
2947 if (i == SLIPORT_READY_TIMEOUT)
2948 status = -1;
2949
2950 return status;
2951}
2952
2953static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2954{
2955 int status;
2956 u32 sliport_status, err, reset_needed;
2957 status = lancer_wait_ready(adapter);
2958 if (!status) {
2959 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2960 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2961 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2962 if (err && reset_needed) {
2963 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2964 adapter->db + SLIPORT_CONTROL_OFFSET);
2965
2966 /* check adapter has corrected the error */
2967 status = lancer_wait_ready(adapter);
2968 sliport_status = ioread32(adapter->db +
2969 SLIPORT_STATUS_OFFSET);
2970 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2971 SLIPORT_STATUS_RN_MASK);
2972 if (status || sliport_status)
2973 status = -1;
2974 } else if (err || reset_needed) {
2975 status = -1;
2976 }
2977 }
2978 return status;
2979}
2980
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981static int __devinit be_probe(struct pci_dev *pdev,
2982 const struct pci_device_id *pdev_id)
2983{
2984 int status = 0;
2985 struct be_adapter *adapter;
2986 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002987
2988 status = pci_enable_device(pdev);
2989 if (status)
2990 goto do_none;
2991
2992 status = pci_request_regions(pdev, DRV_NAME);
2993 if (status)
2994 goto disable_dev;
2995 pci_set_master(pdev);
2996
2997 netdev = alloc_etherdev(sizeof(struct be_adapter));
2998 if (netdev == NULL) {
2999 status = -ENOMEM;
3000 goto rel_reg;
3001 }
3002 adapter = netdev_priv(netdev);
3003 adapter->pdev = pdev;
3004 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003005
3006 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003007 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003008 goto free_netdev;
3009
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003011 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003012
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003013 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003014 if (!status) {
3015 netdev->features |= NETIF_F_HIGHDMA;
3016 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003017 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003018 if (status) {
3019 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3020 goto free_netdev;
3021 }
3022 }
3023
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003024 be_sriov_enable(adapter);
3025
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003026 status = be_ctrl_init(adapter);
3027 if (status)
3028 goto free_netdev;
3029
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003030 if (lancer_chip(adapter)) {
3031 status = lancer_test_and_set_rdy_state(adapter);
3032 if (status) {
3033 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3034 goto free_netdev;
3035 }
3036 }
3037
Sathya Perla2243e2e2009-11-22 22:02:03 +00003038 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003039 if (be_physfn(adapter)) {
3040 status = be_cmd_POST(adapter);
3041 if (status)
3042 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003043 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003044
3045 /* tell fw we're ready to fire cmds */
3046 status = be_cmd_fw_init(adapter);
3047 if (status)
3048 goto ctrl_clean;
3049
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003050 status = be_cmd_reset_function(adapter);
3051 if (status)
3052 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003053
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003054 status = be_stats_init(adapter);
3055 if (status)
3056 goto ctrl_clean;
3057
Sathya Perla2243e2e2009-11-22 22:02:03 +00003058 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003059 if (status)
3060 goto stats_clean;
3061
Sathya Perla3abcded2010-10-03 22:12:27 -07003062 be_msix_enable(adapter);
3063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003064 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003065
Sathya Perla5fb379e2009-06-18 00:02:59 +00003066 status = be_setup(adapter);
3067 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003068 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003069
Sathya Perla3abcded2010-10-03 22:12:27 -07003070 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071 status = register_netdev(netdev);
3072 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003073 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003074 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003075
Ajit Khapardee6319362011-02-11 13:35:41 +00003076 if (be_physfn(adapter) && adapter->sriov_enabled) {
3077 status = be_vf_eth_addr_config(adapter);
3078 if (status)
3079 goto unreg_netdev;
3080 }
3081
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003082 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00003083 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003084 return 0;
3085
Ajit Khapardee6319362011-02-11 13:35:41 +00003086unreg_netdev:
3087 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003088unsetup:
3089 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003090msix_disable:
3091 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003092stats_clean:
3093 be_stats_cleanup(adapter);
3094ctrl_clean:
3095 be_ctrl_cleanup(adapter);
3096free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003097 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003098 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003099 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003100rel_reg:
3101 pci_release_regions(pdev);
3102disable_dev:
3103 pci_disable_device(pdev);
3104do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003105 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003106 return status;
3107}
3108
3109static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3110{
3111 struct be_adapter *adapter = pci_get_drvdata(pdev);
3112 struct net_device *netdev = adapter->netdev;
3113
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003114 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003115 if (adapter->wol)
3116 be_setup_wol(adapter, true);
3117
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003118 netif_device_detach(netdev);
3119 if (netif_running(netdev)) {
3120 rtnl_lock();
3121 be_close(netdev);
3122 rtnl_unlock();
3123 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003124 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003125 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003127 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003128 pci_save_state(pdev);
3129 pci_disable_device(pdev);
3130 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3131 return 0;
3132}
3133
3134static int be_resume(struct pci_dev *pdev)
3135{
3136 int status = 0;
3137 struct be_adapter *adapter = pci_get_drvdata(pdev);
3138 struct net_device *netdev = adapter->netdev;
3139
3140 netif_device_detach(netdev);
3141
3142 status = pci_enable_device(pdev);
3143 if (status)
3144 return status;
3145
3146 pci_set_power_state(pdev, 0);
3147 pci_restore_state(pdev);
3148
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003149 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003150 /* tell fw we're ready to fire cmds */
3151 status = be_cmd_fw_init(adapter);
3152 if (status)
3153 return status;
3154
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003155 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003156 if (netif_running(netdev)) {
3157 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003158 be_open(netdev);
3159 rtnl_unlock();
3160 }
3161 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003162
3163 if (adapter->wol)
3164 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003165
3166 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003167 return 0;
3168}
3169
Sathya Perla82456b02010-02-17 01:35:37 +00003170/*
3171 * An FLR will stop BE from DMAing any data.
3172 */
3173static void be_shutdown(struct pci_dev *pdev)
3174{
3175 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003176
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003177 if (!adapter)
3178 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003179
Sathya Perla0f4a6822011-03-21 20:49:28 +00003180 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003181
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003182 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003183
3184 be_cmd_reset_function(adapter);
3185
3186 if (adapter->wol)
3187 be_setup_wol(adapter, true);
3188
3189 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003190}
3191
Sathya Perlacf588472010-02-14 21:22:01 +00003192static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3193 pci_channel_state_t state)
3194{
3195 struct be_adapter *adapter = pci_get_drvdata(pdev);
3196 struct net_device *netdev = adapter->netdev;
3197
3198 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3199
3200 adapter->eeh_err = true;
3201
3202 netif_device_detach(netdev);
3203
3204 if (netif_running(netdev)) {
3205 rtnl_lock();
3206 be_close(netdev);
3207 rtnl_unlock();
3208 }
3209 be_clear(adapter);
3210
3211 if (state == pci_channel_io_perm_failure)
3212 return PCI_ERS_RESULT_DISCONNECT;
3213
3214 pci_disable_device(pdev);
3215
3216 return PCI_ERS_RESULT_NEED_RESET;
3217}
3218
3219static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3220{
3221 struct be_adapter *adapter = pci_get_drvdata(pdev);
3222 int status;
3223
3224 dev_info(&adapter->pdev->dev, "EEH reset\n");
3225 adapter->eeh_err = false;
3226
3227 status = pci_enable_device(pdev);
3228 if (status)
3229 return PCI_ERS_RESULT_DISCONNECT;
3230
3231 pci_set_master(pdev);
3232 pci_set_power_state(pdev, 0);
3233 pci_restore_state(pdev);
3234
3235 /* Check if card is ok and fw is ready */
3236 status = be_cmd_POST(adapter);
3237 if (status)
3238 return PCI_ERS_RESULT_DISCONNECT;
3239
3240 return PCI_ERS_RESULT_RECOVERED;
3241}
3242
3243static void be_eeh_resume(struct pci_dev *pdev)
3244{
3245 int status = 0;
3246 struct be_adapter *adapter = pci_get_drvdata(pdev);
3247 struct net_device *netdev = adapter->netdev;
3248
3249 dev_info(&adapter->pdev->dev, "EEH resume\n");
3250
3251 pci_save_state(pdev);
3252
3253 /* tell fw we're ready to fire cmds */
3254 status = be_cmd_fw_init(adapter);
3255 if (status)
3256 goto err;
3257
3258 status = be_setup(adapter);
3259 if (status)
3260 goto err;
3261
3262 if (netif_running(netdev)) {
3263 status = be_open(netdev);
3264 if (status)
3265 goto err;
3266 }
3267 netif_device_attach(netdev);
3268 return;
3269err:
3270 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003271}
3272
3273static struct pci_error_handlers be_eeh_handlers = {
3274 .error_detected = be_eeh_err_detected,
3275 .slot_reset = be_eeh_reset,
3276 .resume = be_eeh_resume,
3277};
3278
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003279static struct pci_driver be_driver = {
3280 .name = DRV_NAME,
3281 .id_table = be_dev_ids,
3282 .probe = be_probe,
3283 .remove = be_remove,
3284 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003285 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003286 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003287 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003288};
3289
3290static int __init be_init_module(void)
3291{
Joe Perches8e95a202009-12-03 07:58:21 +00003292 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3293 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003294 printk(KERN_WARNING DRV_NAME
3295 " : Module param rx_frag_size must be 2048/4096/8192."
3296 " Using 2048\n");
3297 rx_frag_size = 2048;
3298 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003299
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003300 return pci_register_driver(&be_driver);
3301}
3302module_init(be_init_module);
3303
3304static void __exit be_exit_module(void)
3305{
3306 pci_unregister_driver(&be_driver);
3307}
3308module_exit(be_exit_module);