blob: 28a32a6c8bf135fe043a607379a65c9b60b599a0 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030module_param(rx_frag_size, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
Sathya Perla3abcded2010-10-03 22:12:27 -0700119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
128 pci_free_consistent(adapter->pdev, mem->size,
129 mem->va, mem->dma);
130}
131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
141 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
142 if (!mem->va)
143 return -1;
144 memset(mem->va, 0, mem->size);
145 return 0;
146}
147
Sathya Perla8788fdc2009-07-27 22:52:03 +0000148static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000150 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 u32 reg = ioread32(addr);
152 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153
Sathya Perlacf588472010-02-14 21:22:01 +0000154 if (adapter->eeh_err)
155 return;
156
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 iowrite32(reg, addr);
165}
166
Sathya Perla8788fdc2009-07-27 22:52:03 +0000167static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168{
169 u32 val = 0;
170 val |= qid & DB_RQ_RING_ID_MASK;
171 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000172
173 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_TXULP_RING_ID_MASK;
181 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188 bool arm, bool clear_int, u16 num_popped)
189{
190 u32 val = 0;
191 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000192 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000194
195 if (adapter->eeh_err)
196 return;
197
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 if (arm)
199 val |= 1 << DB_EQ_REARM_SHIFT;
200 if (clear_int)
201 val |= 1 << DB_EQ_CLR_SHIFT;
202 val |= 1 << DB_EQ_EVNT_SHIFT;
203 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205}
206
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208{
209 u32 val = 0;
210 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000211 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000213
214 if (adapter->eeh_err)
215 return;
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217 if (arm)
218 val |= 1 << DB_CQ_REARM_SHIFT;
219 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000220 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221}
222
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700223static int be_mac_addr_set(struct net_device *netdev, void *p)
224{
225 struct be_adapter *adapter = netdev_priv(netdev);
226 struct sockaddr *addr = p;
227 int status = 0;
228
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000229 if (!is_valid_ether_addr(addr->sa_data))
230 return -EADDRNOTAVAIL;
231
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000232 /* MAC addr configuration will be done in hardware for VFs
233 * by their corresponding PFs. Just copy to netdev addr here
234 */
235 if (!be_physfn(adapter))
236 goto netdev_addr;
237
Sathya Perlaa65027e2009-08-17 00:58:04 +0000238 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
239 if (status)
240 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241
Sathya Perlaa65027e2009-08-17 00:58:04 +0000242 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
243 adapter->if_handle, &adapter->pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000244netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (!status)
246 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
247
248 return status;
249}
250
Sathya Perlab31c50a2009-09-17 10:30:13 -0700251void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252{
Sathya Perla3abcded2010-10-03 22:12:27 -0700253 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
255 struct be_port_rxf_stats *port_stats =
256 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700257 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000258 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700259 struct be_rx_obj *rxo;
260 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261
Sathya Perla3abcded2010-10-03 22:12:27 -0700262 memset(dev_stats, 0, sizeof(*dev_stats));
263 for_all_rx_queues(adapter, rxo, i) {
264 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
265 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
266 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
267 /* no space in linux buffers: best possible approximation */
268 dev_stats->rx_dropped +=
269 erx_stats->rx_drops_no_fragments[rxo->q.id];
270 }
271
272 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
273 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700274
275 /* bad pkts received */
276 dev_stats->rx_errors = port_stats->rx_crc_errors +
277 port_stats->rx_alignment_symbol_errors +
278 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000279 port_stats->rx_out_range_errors +
280 port_stats->rx_frame_too_long +
281 port_stats->rx_dropped_too_small +
282 port_stats->rx_dropped_too_short +
283 port_stats->rx_dropped_header_too_small +
284 port_stats->rx_dropped_tcp_length +
285 port_stats->rx_dropped_runt +
286 port_stats->rx_tcp_checksum_errs +
287 port_stats->rx_ip_checksum_errs +
288 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700290 /* detailed rx errors */
291 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000292 port_stats->rx_out_range_errors +
293 port_stats->rx_frame_too_long;
294
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700295 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
296
297 /* frame alignment errors */
298 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000299
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700300 /* receiver fifo overrun */
301 /* drops_no_pbuf is no per i/f, it's per BE card */
302 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
303 port_stats->rx_input_fifo_overflow +
304 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700305}
306
Sathya Perla8788fdc2009-07-27 22:52:03 +0000307void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700309 struct net_device *netdev = adapter->netdev;
310
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000312 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000313 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000314 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700315 netif_carrier_on(netdev);
316 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000317 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000318 netif_carrier_off(netdev);
319 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000321 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700322 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700323}
324
325/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700326static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700327{
Sathya Perla3abcded2010-10-03 22:12:27 -0700328 struct be_eq_obj *rx_eq = &rxo->rx_eq;
329 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700330 ulong now = jiffies;
331 u32 eqd;
332
333 if (!rx_eq->enable_aic)
334 return;
335
336 /* Wrapped around */
337 if (time_before(now, stats->rx_fps_jiffies)) {
338 stats->rx_fps_jiffies = now;
339 return;
340 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700341
342 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700343 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700344 return;
345
Sathya Perla3abcded2010-10-03 22:12:27 -0700346 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700347 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700348
Sathya Perla4097f662009-03-24 16:40:13 -0700349 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700350 stats->prev_rx_frags = stats->rx_frags;
351 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700352 eqd = eqd << 3;
353 if (eqd > rx_eq->max_eqd)
354 eqd = rx_eq->max_eqd;
355 if (eqd < rx_eq->min_eqd)
356 eqd = rx_eq->min_eqd;
357 if (eqd < 10)
358 eqd = 0;
359 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000360 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700361
362 rx_eq->cur_eqd = eqd;
363}
364
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700365static u32 be_calc_rate(u64 bytes, unsigned long ticks)
366{
367 u64 rate = bytes;
368
369 do_div(rate, ticks / HZ);
370 rate <<= 3; /* bytes/sec -> bits/sec */
371 do_div(rate, 1000000ul); /* MB/Sec */
372
373 return rate;
374}
375
Sathya Perla4097f662009-03-24 16:40:13 -0700376static void be_tx_rate_update(struct be_adapter *adapter)
377{
Sathya Perla3abcded2010-10-03 22:12:27 -0700378 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700379 ulong now = jiffies;
380
381 /* Wrapped around? */
382 if (time_before(now, stats->be_tx_jiffies)) {
383 stats->be_tx_jiffies = now;
384 return;
385 }
386
387 /* Update tx rate once in two seconds */
388 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700389 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
390 - stats->be_tx_bytes_prev,
391 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700392 stats->be_tx_jiffies = now;
393 stats->be_tx_bytes_prev = stats->be_tx_bytes;
394 }
395}
396
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700397static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000398 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700399{
Sathya Perla3abcded2010-10-03 22:12:27 -0700400 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700401 stats->be_tx_reqs++;
402 stats->be_tx_wrbs += wrb_cnt;
403 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000404 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700405 if (stopped)
406 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700407}
408
409/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000410static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
411 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700412{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700413 int cnt = (skb->len > skb->data_len);
414
415 cnt += skb_shinfo(skb)->nr_frags;
416
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700417 /* to account for hdr wrb */
418 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000419 if (lancer_chip(adapter) || !(cnt & 1)) {
420 *dummy = false;
421 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700422 /* add a dummy to make it an even num */
423 cnt++;
424 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000425 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700426 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
427 return cnt;
428}
429
430static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
431{
432 wrb->frag_pa_hi = upper_32_bits(addr);
433 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
434 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
435}
436
Somnath Koturcc4ce022010-10-21 07:11:14 -0700437static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
438 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700439{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700440 u8 vlan_prio = 0;
441 u16 vlan_tag = 0;
442
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700443 memset(hdr, 0, sizeof(*hdr));
444
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
446
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000447 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700448 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
450 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000451 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000453 if (lancer_chip(adapter) && adapter->sli_family ==
454 LANCER_A0_SLI_FAMILY) {
455 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
456 if (is_tcp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 tcpcs, hdr, 1);
459 else if (is_udp_pkt(skb))
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
461 udpcs, hdr, 1);
462 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
464 if (is_tcp_pkt(skb))
465 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
466 else if (is_udp_pkt(skb))
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
468 }
469
Somnath Koturcc4ce022010-10-21 07:11:14 -0700470 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700471 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700472 vlan_tag = vlan_tx_tag_get(skb);
473 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
474 /* If vlan priority provided by OS is NOT in available bmap */
475 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
476 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
477 adapter->recommended_prio;
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479 }
480
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
482 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
485}
486
Sathya Perla7101e112010-03-22 20:41:12 +0000487static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
488 bool unmap_single)
489{
490 dma_addr_t dma;
491
492 be_dws_le_to_cpu(wrb, sizeof(*wrb));
493
494 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000495 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000496 if (unmap_single)
497 pci_unmap_single(pdev, dma, wrb->frag_len,
498 PCI_DMA_TODEVICE);
499 else
500 pci_unmap_page(pdev, dma, wrb->frag_len,
501 PCI_DMA_TODEVICE);
502 }
503}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700504
505static int make_tx_wrbs(struct be_adapter *adapter,
506 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
507{
Sathya Perla7101e112010-03-22 20:41:12 +0000508 dma_addr_t busaddr;
509 int i, copied = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700510 struct pci_dev *pdev = adapter->pdev;
511 struct sk_buff *first_skb = skb;
512 struct be_queue_info *txq = &adapter->tx_obj.q;
513 struct be_eth_wrb *wrb;
514 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000515 bool map_single = false;
516 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518 hdr = queue_head_node(txq);
519 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000520 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700521
David S. Millerebc8d2a2009-06-09 01:01:31 -0700522 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700523 int len = skb_headlen(skb);
Alexander Duycka73b7962009-12-02 16:48:18 +0000524 busaddr = pci_map_single(pdev, skb->data, len,
525 PCI_DMA_TODEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000526 if (pci_dma_mapping_error(pdev, busaddr))
527 goto dma_err;
528 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, busaddr, len);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532 queue_head_inc(txq);
533 copied += len;
534 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535
David S. Millerebc8d2a2009-06-09 01:01:31 -0700536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537 struct skb_frag_struct *frag =
538 &skb_shinfo(skb)->frags[i];
Alexander Duycka73b7962009-12-02 16:48:18 +0000539 busaddr = pci_map_page(pdev, frag->page,
540 frag->page_offset,
541 frag->size, PCI_DMA_TODEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000542 if (pci_dma_mapping_error(pdev, busaddr))
543 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700544 wrb = queue_head_node(txq);
545 wrb_fill(wrb, busaddr, frag->size);
546 be_dws_cpu_to_le(wrb, sizeof(*wrb));
547 queue_head_inc(txq);
548 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 }
550
551 if (dummy_wrb) {
552 wrb = queue_head_node(txq);
553 wrb_fill(wrb, 0, 0);
554 be_dws_cpu_to_le(wrb, sizeof(*wrb));
555 queue_head_inc(txq);
556 }
557
Somnath Koturcc4ce022010-10-21 07:11:14 -0700558 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700559 be_dws_cpu_to_le(hdr, sizeof(*hdr));
560
561 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000562dma_err:
563 txq->head = map_head;
564 while (copied) {
565 wrb = queue_head_node(txq);
566 unmap_tx_frag(pdev, wrb, map_single);
567 map_single = false;
568 copied -= wrb->frag_len;
569 queue_head_inc(txq);
570 }
571 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700572}
573
Stephen Hemminger613573252009-08-31 19:50:58 +0000574static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700575 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576{
577 struct be_adapter *adapter = netdev_priv(netdev);
578 struct be_tx_obj *tx_obj = &adapter->tx_obj;
579 struct be_queue_info *txq = &tx_obj->q;
580 u32 wrb_cnt = 0, copied = 0;
581 u32 start = txq->head;
582 bool dummy_wrb, stopped = false;
583
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000584 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585
586 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000587 if (copied) {
588 /* record the sent skb in the sent_skb table */
589 BUG_ON(tx_obj->sent_skb_list[start]);
590 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000592 /* Ensure txq has space for the next skb; Else stop the queue
593 * *BEFORE* ringing the tx doorbell, so that we serialze the
594 * tx compls of the current transmit which'll wake up the queue
595 */
Sathya Perla7101e112010-03-22 20:41:12 +0000596 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000597 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
598 txq->len) {
599 netif_stop_queue(netdev);
600 stopped = true;
601 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000603 be_txq_notify(adapter, txq->id, wrb_cnt);
604
Ajit Khaparde91992e42010-02-19 13:57:12 +0000605 be_tx_stats_update(adapter, wrb_cnt, copied,
606 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000607 } else {
608 txq->head = start;
609 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611 return NETDEV_TX_OK;
612}
613
614static int be_change_mtu(struct net_device *netdev, int new_mtu)
615{
616 struct be_adapter *adapter = netdev_priv(netdev);
617 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000618 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
619 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700620 dev_info(&adapter->pdev->dev,
621 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000622 BE_MIN_MTU,
623 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624 return -EINVAL;
625 }
626 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
627 netdev->mtu, new_mtu);
628 netdev->mtu = new_mtu;
629 return 0;
630}
631
632/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000633 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
634 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000636static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 u16 vtag[BE_NUM_VLANS_SUPPORTED];
639 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000640 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000641 u32 if_handle;
642
643 if (vf) {
644 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
645 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
646 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
647 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
Ajit Khaparde82903e42010-02-09 01:34:57 +0000649 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000651 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 if (adapter->vlan_tag[i]) {
653 vtag[ntags] = cpu_to_le16(i);
654 ntags++;
655 }
656 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700657 status = be_cmd_vlan_config(adapter, adapter->if_handle,
658 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700660 status = be_cmd_vlan_config(adapter, adapter->if_handle,
661 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000663
Sathya Perlab31c50a2009-09-17 10:30:13 -0700664 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665}
666
667static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
668{
669 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672}
673
674static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
675{
676 struct be_adapter *adapter = netdev_priv(netdev);
677
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000678 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000679 if (!be_physfn(adapter))
680 return;
681
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000683 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000684 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685}
686
687static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
688{
689 struct be_adapter *adapter = netdev_priv(netdev);
690
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000691 adapter->vlans_added--;
692 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
693
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000694 if (!be_physfn(adapter))
695 return;
696
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000698 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000699 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700}
701
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702static void be_set_multicast_list(struct net_device *netdev)
703{
704 struct be_adapter *adapter = netdev_priv(netdev);
705
706 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000707 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000708 adapter->promiscuous = true;
709 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000711
712 /* BE was previously in promiscous mode; disable it */
713 if (adapter->promiscuous) {
714 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000715 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000716 }
717
Sathya Perlae7b909a2009-11-22 22:01:10 +0000718 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000719 if (netdev->flags & IFF_ALLMULTI ||
720 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000721 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000722 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000723 goto done;
724 }
725
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000726 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800727 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000728done:
729 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730}
731
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000732static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
733{
734 struct be_adapter *adapter = netdev_priv(netdev);
735 int status;
736
737 if (!adapter->sriov_enabled)
738 return -EPERM;
739
740 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
741 return -EINVAL;
742
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000743 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
744 status = be_cmd_pmac_del(adapter,
745 adapter->vf_cfg[vf].vf_if_handle,
746 adapter->vf_cfg[vf].vf_pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000747
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000748 status = be_cmd_pmac_add(adapter, mac,
749 adapter->vf_cfg[vf].vf_if_handle,
750 &adapter->vf_cfg[vf].vf_pmac_id);
751
752 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000753 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
754 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000755 else
756 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
757
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000758 return status;
759}
760
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000761static int be_get_vf_config(struct net_device *netdev, int vf,
762 struct ifla_vf_info *vi)
763{
764 struct be_adapter *adapter = netdev_priv(netdev);
765
766 if (!adapter->sriov_enabled)
767 return -EPERM;
768
769 if (vf >= num_vfs)
770 return -EINVAL;
771
772 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000773 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000774 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000775 vi->qos = 0;
776 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
777
778 return 0;
779}
780
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000781static int be_set_vf_vlan(struct net_device *netdev,
782 int vf, u16 vlan, u8 qos)
783{
784 struct be_adapter *adapter = netdev_priv(netdev);
785 int status = 0;
786
787 if (!adapter->sriov_enabled)
788 return -EPERM;
789
790 if ((vf >= num_vfs) || (vlan > 4095))
791 return -EINVAL;
792
793 if (vlan) {
794 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
795 adapter->vlans_added++;
796 } else {
797 adapter->vf_cfg[vf].vf_vlan_tag = 0;
798 adapter->vlans_added--;
799 }
800
801 status = be_vid_config(adapter, true, vf);
802
803 if (status)
804 dev_info(&adapter->pdev->dev,
805 "VLAN %d config on VF %d failed\n", vlan, vf);
806 return status;
807}
808
Ajit Khapardee1d18732010-07-23 01:52:13 +0000809static int be_set_vf_tx_rate(struct net_device *netdev,
810 int vf, int rate)
811{
812 struct be_adapter *adapter = netdev_priv(netdev);
813 int status = 0;
814
815 if (!adapter->sriov_enabled)
816 return -EPERM;
817
818 if ((vf >= num_vfs) || (rate < 0))
819 return -EINVAL;
820
821 if (rate > 10000)
822 rate = 10000;
823
824 adapter->vf_cfg[vf].vf_tx_rate = rate;
825 status = be_cmd_set_qos(adapter, rate / 10, vf);
826
827 if (status)
828 dev_info(&adapter->pdev->dev,
829 "tx rate %d on VF %d failed\n", rate, vf);
830 return status;
831}
832
Sathya Perla3abcded2010-10-03 22:12:27 -0700833static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834{
Sathya Perla3abcded2010-10-03 22:12:27 -0700835 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700836 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837
Sathya Perla4097f662009-03-24 16:40:13 -0700838 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700839 if (time_before(now, stats->rx_jiffies)) {
840 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700841 return;
842 }
843
844 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700845 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700846 return;
847
Sathya Perla3abcded2010-10-03 22:12:27 -0700848 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
849 now - stats->rx_jiffies);
850 stats->rx_jiffies = now;
851 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700852}
853
Sathya Perla3abcded2010-10-03 22:12:27 -0700854static void be_rx_stats_update(struct be_rx_obj *rxo,
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000855 u32 pktsize, u16 numfrags, u8 pkt_type)
Sathya Perla4097f662009-03-24 16:40:13 -0700856{
Sathya Perla3abcded2010-10-03 22:12:27 -0700857 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700858
Sathya Perla3abcded2010-10-03 22:12:27 -0700859 stats->rx_compl++;
860 stats->rx_frags += numfrags;
861 stats->rx_bytes += pktsize;
862 stats->rx_pkts++;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000863 if (pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700864 stats->rx_mcast_pkts++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865}
866
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000867static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700868{
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000869 u8 l4_cksm, ipv6, ipcksm;
Ajit Khaparde728a9972009-04-13 15:41:22 -0700870
871 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
872 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000873 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700874
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000875 /* Ignore ipcksm for ipv6 pkts */
876 return l4_cksm && (ipcksm || ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700877}
878
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700880get_rx_page_info(struct be_adapter *adapter,
881 struct be_rx_obj *rxo,
882 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700883{
884 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700885 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886
Sathya Perla3abcded2010-10-03 22:12:27 -0700887 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888 BUG_ON(!rx_page_info->page);
889
Ajit Khaparde205859a2010-02-09 01:34:21 +0000890 if (rx_page_info->last_page_user) {
FUJITA Tomonorifac6da52010-04-01 16:53:22 +0000891 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892 adapter->big_page_size, PCI_DMA_FROMDEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000893 rx_page_info->last_page_user = false;
894 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700895
896 atomic_dec(&rxq->used);
897 return rx_page_info;
898}
899
900/* Throwaway the data in the Rx completion */
901static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700902 struct be_rx_obj *rxo,
903 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700904{
Sathya Perla3abcded2010-10-03 22:12:27 -0700905 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700906 struct be_rx_page_info *page_info;
907 u16 rxq_idx, i, num_rcvd;
908
909 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
910 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
911
Sathya Perla64642812010-12-01 01:04:17 +0000912 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
913 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
914
915 rxo->last_frag_index = rxq_idx;
916
917 for (i = 0; i < num_rcvd; i++) {
918 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
919 put_page(page_info->page);
920 memset(page_info, 0, sizeof(*page_info));
921 index_inc(&rxq_idx, rxq->len);
922 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700923 }
924}
925
926/*
927 * skb_fill_rx_data forms a complete skb for an ether frame
928 * indicated by rxcp.
929 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700930static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla89420422010-02-17 01:35:26 +0000931 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
932 u16 num_rcvd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933{
Sathya Perla3abcded2010-10-03 22:12:27 -0700934 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935 struct be_rx_page_info *page_info;
Sathya Perla89420422010-02-17 01:35:26 +0000936 u16 rxq_idx, i, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700937 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700938 u8 *start;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000939 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700940
941 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
942 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000943 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944
Sathya Perla3abcded2010-10-03 22:12:27 -0700945 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946
947 start = page_address(page_info->page) + page_info->page_offset;
948 prefetch(start);
949
950 /* Copy data in the first descriptor of this completion */
951 curr_frag_len = min(pktsize, rx_frag_size);
952
953 /* Copy the header portion into skb_data */
954 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
955 memcpy(skb->data, start, hdr_len);
956 skb->len = curr_frag_len;
957 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
958 /* Complete packet has now been moved to data */
959 put_page(page_info->page);
960 skb->data_len = 0;
961 skb->tail += curr_frag_len;
962 } else {
963 skb_shinfo(skb)->nr_frags = 1;
964 skb_shinfo(skb)->frags[0].page = page_info->page;
965 skb_shinfo(skb)->frags[0].page_offset =
966 page_info->page_offset + hdr_len;
967 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
968 skb->data_len = curr_frag_len - hdr_len;
969 skb->tail += hdr_len;
970 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000971 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700972
973 if (pktsize <= rx_frag_size) {
974 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000975 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700976 }
977
978 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700979 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000980 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700981 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982 index_inc(&rxq_idx, rxq->len);
Sathya Perla3abcded2010-10-03 22:12:27 -0700983 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984
Ajit Khapardefa774062009-07-22 09:28:55 -0700985 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700986
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000987 /* Coalesce all frags from the same physical page in one slot */
988 if (page_info->page_offset == 0) {
989 /* Fresh page */
990 j++;
991 skb_shinfo(skb)->frags[j].page = page_info->page;
992 skb_shinfo(skb)->frags[j].page_offset =
993 page_info->page_offset;
994 skb_shinfo(skb)->frags[j].size = 0;
995 skb_shinfo(skb)->nr_frags++;
996 } else {
997 put_page(page_info->page);
998 }
999
1000 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001001 skb->len += curr_frag_len;
1002 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001003
Ajit Khaparde205859a2010-02-09 01:34:21 +00001004 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001005 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001006 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001007
Sathya Perla76fbb422009-06-10 02:21:56 +00001008done:
Sathya Perla3abcded2010-10-03 22:12:27 -07001009 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010}
1011
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001012/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001013static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001014 struct be_rx_obj *rxo,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015 struct be_eth_rx_compl *rxcp)
1016{
1017 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001018 u32 vlanf, vid;
Sathya Perla89420422010-02-17 01:35:26 +00001019 u16 num_rcvd;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001020 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001021
Sathya Perla89420422010-02-17 01:35:26 +00001022 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
Sathya Perla89420422010-02-17 01:35:26 +00001023
Eric Dumazet89d71a62009-10-13 05:34:20 +00001024 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001025 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001026 if (net_ratelimit())
1027 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001028 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029 return;
1030 }
1031
Sathya Perla3abcded2010-10-03 22:12:27 -07001032 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001034 if (likely(adapter->rx_csum && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001035 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001036 else
1037 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038
1039 skb->truesize = skb->len + sizeof(struct sk_buff);
1040 skb->protocol = eth_type_trans(skb, adapter->netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Sathya Perlaa058a632010-02-17 01:34:22 +00001042 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1043 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1044
1045 /* vlanf could be wrongly set in some cards.
1046 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001047 if ((adapter->function_mode & 0x400) && !vtm)
Sathya Perlaa058a632010-02-17 01:34:22 +00001048 vlanf = 0;
1049
1050 if (unlikely(vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001051 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 kfree_skb(skb);
1053 return;
1054 }
1055 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001056 if (!lancer_chip(adapter))
1057 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1059 } else {
1060 netif_receive_skb(skb);
1061 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062}
1063
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001064/* Process the RX completion indicated by rxcp when GRO is enabled */
1065static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001066 struct be_rx_obj *rxo,
1067 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068{
1069 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001070 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001071 struct be_queue_info *rxq = &rxo->q;
1072 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001074 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001075 u8 vtm;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001076 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077
1078 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1079 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1080 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1081 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001082 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001083 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001084
1085 /* vlanf could be wrongly set in some cards.
1086 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001087 if ((adapter->function_mode & 0x400) && !vtm)
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001088 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001090 skb = napi_get_frags(&eq_obj->napi);
1091 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001092 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001093 return;
1094 }
1095
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001097 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001098 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099
1100 curr_frag_len = min(remaining, rx_frag_size);
1101
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001102 /* Coalesce all frags from the same physical page in one slot */
1103 if (i == 0 || page_info->page_offset == 0) {
1104 /* First frag or Fresh page */
1105 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001106 skb_shinfo(skb)->frags[j].page = page_info->page;
1107 skb_shinfo(skb)->frags[j].page_offset =
1108 page_info->page_offset;
1109 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001110 } else {
1111 put_page(page_info->page);
1112 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001113 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001114
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117 memset(page_info, 0, sizeof(*page_info));
1118 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001119 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001121 skb_shinfo(skb)->nr_frags = j + 1;
1122 skb->len = pkt_size;
1123 skb->data_len = pkt_size;
1124 skb->truesize += pkt_size;
1125 skb->ip_summed = CHECKSUM_UNNECESSARY;
1126
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001128 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 } else {
1130 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001131 if (!lancer_chip(adapter))
1132 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133
Ajit Khaparde82903e42010-02-09 01:34:57 +00001134 if (!adapter->vlan_grp || adapter->vlans_added == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135 return;
1136
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001137 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138 }
1139
Sathya Perla3abcded2010-10-03 22:12:27 -07001140 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141}
1142
Sathya Perla3abcded2010-10-03 22:12:27 -07001143static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144{
Sathya Perla3abcded2010-10-03 22:12:27 -07001145 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146
1147 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1148 return NULL;
1149
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001150 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1152
Sathya Perla3abcded2010-10-03 22:12:27 -07001153 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154 return rxcp;
1155}
1156
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001157/* To reset the valid bit, we need to reset the whole word as
1158 * when walking the queue the valid entries are little-endian
1159 * and invalid entries are host endian
1160 */
1161static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1162{
1163 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1164}
1165
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166static inline struct page *be_alloc_pages(u32 size)
1167{
1168 gfp_t alloc_flags = GFP_ATOMIC;
1169 u32 order = get_order(size);
1170 if (order > 0)
1171 alloc_flags |= __GFP_COMP;
1172 return alloc_pages(alloc_flags, order);
1173}
1174
1175/*
1176 * Allocate a page, split it to fragments of size rx_frag_size and post as
1177 * receive buffers to BE
1178 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001179static void be_post_rx_frags(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180{
Sathya Perla3abcded2010-10-03 22:12:27 -07001181 struct be_adapter *adapter = rxo->adapter;
1182 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001183 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001184 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185 struct page *pagep = NULL;
1186 struct be_eth_rx_d *rxd;
1187 u64 page_dmaaddr = 0, frag_dmaaddr;
1188 u32 posted, page_offset = 0;
1189
Sathya Perla3abcded2010-10-03 22:12:27 -07001190 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1192 if (!pagep) {
1193 pagep = be_alloc_pages(adapter->big_page_size);
1194 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001195 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196 break;
1197 }
1198 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1199 adapter->big_page_size,
1200 PCI_DMA_FROMDEVICE);
1201 page_info->page_offset = 0;
1202 } else {
1203 get_page(pagep);
1204 page_info->page_offset = page_offset + rx_frag_size;
1205 }
1206 page_offset = page_info->page_offset;
1207 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001208 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1210
1211 rxd = queue_head_node(rxq);
1212 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1213 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214
1215 /* Any space left in the current big page for another frag? */
1216 if ((page_offset + rx_frag_size + rx_frag_size) >
1217 adapter->big_page_size) {
1218 pagep = NULL;
1219 page_info->last_page_user = true;
1220 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001221
1222 prev_page_info = page_info;
1223 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 page_info = &page_info_tbl[rxq->head];
1225 }
1226 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001227 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001228
1229 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001231 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001232 } else if (atomic_read(&rxq->used) == 0) {
1233 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001234 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001236}
1237
Sathya Perla5fb379e2009-06-18 00:02:59 +00001238static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1241
1242 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1243 return NULL;
1244
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001245 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1247
1248 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1249
1250 queue_tail_inc(tx_cq);
1251 return txcp;
1252}
1253
1254static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1255{
1256 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001257 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1259 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001260 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1261 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001262
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001263 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001265 sent_skbs[txq->tail] = NULL;
1266
1267 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001268 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001270 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001272 wrb = queue_tail_node(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001273 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
Eric Dumazete743d312010-04-14 15:59:40 -07001274 skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001275 unmap_skb_hdr = false;
1276
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 num_wrbs++;
1278 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001279 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280
1281 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001282
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283 kfree_skb(sent_skb);
1284}
1285
Sathya Perla859b1e42009-08-10 03:43:51 +00001286static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1287{
1288 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1289
1290 if (!eqe->evt)
1291 return NULL;
1292
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001293 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001294 eqe->evt = le32_to_cpu(eqe->evt);
1295 queue_tail_inc(&eq_obj->q);
1296 return eqe;
1297}
1298
1299static int event_handle(struct be_adapter *adapter,
1300 struct be_eq_obj *eq_obj)
1301{
1302 struct be_eq_entry *eqe;
1303 u16 num = 0;
1304
1305 while ((eqe = event_get(eq_obj)) != NULL) {
1306 eqe->evt = 0;
1307 num++;
1308 }
1309
1310 /* Deal with any spurious interrupts that come
1311 * without events
1312 */
1313 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1314 if (num)
1315 napi_schedule(&eq_obj->napi);
1316
1317 return num;
1318}
1319
1320/* Just read and notify events without processing them.
1321 * Used at the time of destroying event queues */
1322static void be_eq_clean(struct be_adapter *adapter,
1323 struct be_eq_obj *eq_obj)
1324{
1325 struct be_eq_entry *eqe;
1326 u16 num = 0;
1327
1328 while ((eqe = event_get(eq_obj)) != NULL) {
1329 eqe->evt = 0;
1330 num++;
1331 }
1332
1333 if (num)
1334 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1335}
1336
Sathya Perla3abcded2010-10-03 22:12:27 -07001337static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338{
1339 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001340 struct be_queue_info *rxq = &rxo->q;
1341 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342 struct be_eth_rx_compl *rxcp;
1343 u16 tail;
1344
1345 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001346 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1347 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001348 be_rx_compl_reset(rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001349 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350 }
1351
1352 /* Then free posted rx buffer that were not used */
1353 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001354 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001355 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356 put_page(page_info->page);
1357 memset(page_info, 0, sizeof(*page_info));
1358 }
1359 BUG_ON(atomic_read(&rxq->used));
1360}
1361
Sathya Perlaa8e91792009-08-10 03:42:43 +00001362static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001364 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001366 struct be_eth_tx_compl *txcp;
1367 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001368 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1369 struct sk_buff *sent_skb;
1370 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371
Sathya Perlaa8e91792009-08-10 03:42:43 +00001372 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1373 do {
1374 while ((txcp = be_tx_compl_get(tx_cq))) {
1375 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1376 wrb_index, txcp);
1377 be_tx_compl_process(adapter, end_idx);
1378 cmpl++;
1379 }
1380 if (cmpl) {
1381 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1382 cmpl = 0;
1383 }
1384
1385 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1386 break;
1387
1388 mdelay(1);
1389 } while (true);
1390
1391 if (atomic_read(&txq->used))
1392 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1393 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001394
1395 /* free posted tx for which compls will never arrive */
1396 while (atomic_read(&txq->used)) {
1397 sent_skb = sent_skbs[txq->tail];
1398 end_idx = txq->tail;
1399 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001400 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1401 txq->len);
Sathya Perlab03388d2010-02-18 00:37:17 +00001402 be_tx_compl_process(adapter, end_idx);
1403 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404}
1405
Sathya Perla5fb379e2009-06-18 00:02:59 +00001406static void be_mcc_queues_destroy(struct be_adapter *adapter)
1407{
1408 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001409
Sathya Perla8788fdc2009-07-27 22:52:03 +00001410 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001411 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001412 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001413 be_queue_free(adapter, q);
1414
Sathya Perla8788fdc2009-07-27 22:52:03 +00001415 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001416 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001417 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001418 be_queue_free(adapter, q);
1419}
1420
1421/* Must be called only after TX qs are created as MCC shares TX EQ */
1422static int be_mcc_queues_create(struct be_adapter *adapter)
1423{
1424 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001425
1426 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001427 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001428 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001429 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001430 goto err;
1431
1432 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001433 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001434 goto mcc_cq_free;
1435
1436 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001437 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001438 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1439 goto mcc_cq_destroy;
1440
1441 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001442 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001443 goto mcc_q_free;
1444
1445 return 0;
1446
1447mcc_q_free:
1448 be_queue_free(adapter, q);
1449mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001450 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001451mcc_cq_free:
1452 be_queue_free(adapter, cq);
1453err:
1454 return -1;
1455}
1456
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457static void be_tx_queues_destroy(struct be_adapter *adapter)
1458{
1459 struct be_queue_info *q;
1460
1461 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001462 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001463 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464 be_queue_free(adapter, q);
1465
1466 q = &adapter->tx_obj.cq;
1467 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001468 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469 be_queue_free(adapter, q);
1470
Sathya Perla859b1e42009-08-10 03:43:51 +00001471 /* Clear any residual events */
1472 be_eq_clean(adapter, &adapter->tx_eq);
1473
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474 q = &adapter->tx_eq.q;
1475 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001476 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 be_queue_free(adapter, q);
1478}
1479
1480static int be_tx_queues_create(struct be_adapter *adapter)
1481{
1482 struct be_queue_info *eq, *q, *cq;
1483
1484 adapter->tx_eq.max_eqd = 0;
1485 adapter->tx_eq.min_eqd = 0;
1486 adapter->tx_eq.cur_eqd = 96;
1487 adapter->tx_eq.enable_aic = false;
1488 /* Alloc Tx Event queue */
1489 eq = &adapter->tx_eq.q;
1490 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1491 return -1;
1492
1493 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001494 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001496
1497 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1498
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001499
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 /* Alloc TX eth compl queue */
1501 cq = &adapter->tx_obj.cq;
1502 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1503 sizeof(struct be_eth_tx_compl)))
1504 goto tx_eq_destroy;
1505
1506 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001507 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508 goto tx_cq_free;
1509
1510 /* Alloc TX eth queue */
1511 q = &adapter->tx_obj.q;
1512 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1513 goto tx_cq_destroy;
1514
1515 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001516 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 goto tx_q_free;
1518 return 0;
1519
1520tx_q_free:
1521 be_queue_free(adapter, q);
1522tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001523 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524tx_cq_free:
1525 be_queue_free(adapter, cq);
1526tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001527 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528tx_eq_free:
1529 be_queue_free(adapter, eq);
1530 return -1;
1531}
1532
1533static void be_rx_queues_destroy(struct be_adapter *adapter)
1534{
1535 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001536 struct be_rx_obj *rxo;
1537 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538
Sathya Perla3abcded2010-10-03 22:12:27 -07001539 for_all_rx_queues(adapter, rxo, i) {
1540 q = &rxo->q;
1541 if (q->created) {
1542 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1543 /* After the rxq is invalidated, wait for a grace time
1544 * of 1ms for all dma to end and the flush compl to
1545 * arrive
1546 */
1547 mdelay(1);
1548 be_rx_q_clean(adapter, rxo);
1549 }
1550 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001551
Sathya Perla3abcded2010-10-03 22:12:27 -07001552 q = &rxo->cq;
1553 if (q->created)
1554 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1555 be_queue_free(adapter, q);
1556
1557 /* Clear any residual events */
1558 q = &rxo->rx_eq.q;
1559 if (q->created) {
1560 be_eq_clean(adapter, &rxo->rx_eq);
1561 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1562 }
1563 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565}
1566
1567static int be_rx_queues_create(struct be_adapter *adapter)
1568{
1569 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001570 struct be_rx_obj *rxo;
1571 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001574 for_all_rx_queues(adapter, rxo, i) {
1575 rxo->adapter = adapter;
Sathya Perla64642812010-12-01 01:04:17 +00001576 /* Init last_frag_index so that the frag index in the first
1577 * completion will never match */
1578 rxo->last_frag_index = 0xffff;
Sathya Perla3abcded2010-10-03 22:12:27 -07001579 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581
Sathya Perla3abcded2010-10-03 22:12:27 -07001582 /* EQ */
1583 eq = &rxo->rx_eq.q;
1584 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585 sizeof(struct be_eq_entry));
1586 if (rc)
1587 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588
Sathya Perla3abcded2010-10-03 22:12:27 -07001589 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1590 if (rc)
1591 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001593 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1594
Sathya Perla3abcded2010-10-03 22:12:27 -07001595 /* CQ */
1596 cq = &rxo->cq;
1597 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598 sizeof(struct be_eth_rx_compl));
1599 if (rc)
1600 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601
Sathya Perla3abcded2010-10-03 22:12:27 -07001602 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1603 if (rc)
1604 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001605 /* Rx Q */
1606 q = &rxo->q;
1607 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608 sizeof(struct be_eth_rx_d));
1609 if (rc)
1610 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611
Sathya Perla3abcded2010-10-03 22:12:27 -07001612 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1615 if (rc)
1616 goto err;
1617 }
1618
1619 if (be_multi_rxq(adapter)) {
1620 u8 rsstable[MAX_RSS_QS];
1621
1622 for_all_rss_queues(adapter, rxo, i)
1623 rsstable[i] = rxo->rss_id;
1624
1625 rc = be_cmd_rss_config(adapter, rsstable,
1626 adapter->num_rx_qs - 1);
1627 if (rc)
1628 goto err;
1629 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
1631 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001632err:
1633 be_rx_queues_destroy(adapter);
1634 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001637static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001638{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001639 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640 if (!eqe->evt)
1641 return false;
1642 else
1643 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001644}
1645
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646static irqreturn_t be_intx(int irq, void *dev)
1647{
1648 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001649 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001650 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001652 if (lancer_chip(adapter)) {
1653 if (event_peek(&adapter->tx_eq))
1654 tx = event_handle(adapter, &adapter->tx_eq);
1655 for_all_rx_queues(adapter, rxo, i) {
1656 if (event_peek(&rxo->rx_eq))
1657 rx |= event_handle(adapter, &rxo->rx_eq);
1658 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001660 if (!(tx || rx))
1661 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001662
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001663 } else {
1664 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666 if (!isr)
1667 return IRQ_NONE;
1668
1669 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670 event_handle(adapter, &adapter->tx_eq);
1671
1672 for_all_rx_queues(adapter, rxo, i) {
1673 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674 event_handle(adapter, &rxo->rx_eq);
1675 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001676 }
Sathya Perlac001c212009-07-01 01:06:07 +00001677
Sathya Perla8788fdc2009-07-27 22:52:03 +00001678 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679}
1680
1681static irqreturn_t be_msix_rx(int irq, void *dev)
1682{
Sathya Perla3abcded2010-10-03 22:12:27 -07001683 struct be_rx_obj *rxo = dev;
1684 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685
Sathya Perla3abcded2010-10-03 22:12:27 -07001686 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
1688 return IRQ_HANDLED;
1689}
1690
Sathya Perla5fb379e2009-06-18 00:02:59 +00001691static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692{
1693 struct be_adapter *adapter = dev;
1694
Sathya Perla8788fdc2009-07-27 22:52:03 +00001695 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
1697 return IRQ_HANDLED;
1698}
1699
Sathya Perla64642812010-12-01 01:04:17 +00001700static inline bool do_gro(struct be_rx_obj *rxo,
1701 struct be_eth_rx_compl *rxcp, u8 err)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1704
1705 if (err)
Sathya Perla3abcded2010-10-03 22:12:27 -07001706 rxo->stats.rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001708 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709}
1710
stephen hemminger49b05222010-10-21 07:50:48 +00001711static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712{
1713 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001714 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1715 struct be_adapter *adapter = rxo->adapter;
1716 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 struct be_eth_rx_compl *rxcp;
1718 u32 work_done;
Sathya Perla64642812010-12-01 01:04:17 +00001719 u16 frag_index, num_rcvd;
1720 u8 err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721
Sathya Perla3abcded2010-10-03 22:12:27 -07001722 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001724 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 if (!rxcp)
1726 break;
1727
Sathya Perla64642812010-12-01 01:04:17 +00001728 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1729 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1730 rxcp);
1731 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1732 rxcp);
1733
1734 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1735 if (likely(frag_index != rxo->last_frag_index &&
1736 num_rcvd != 0)) {
1737 rxo->last_frag_index = frag_index;
1738
1739 if (do_gro(rxo, rxcp, err))
1740 be_rx_compl_process_gro(adapter, rxo, rxcp);
1741 else
1742 be_rx_compl_process(adapter, rxo, rxcp);
1743 }
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001744
1745 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746 }
1747
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001749 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1750 be_post_rx_frags(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751
1752 /* All consumed */
1753 if (work_done < budget) {
1754 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001755 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756 } else {
1757 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001758 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759 }
1760 return work_done;
1761}
1762
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001763/* As TX and MCC share the same EQ check for both TX and MCC completions.
1764 * For TX/MCC we don't honour budget; consume everything
1765 */
1766static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001768 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1769 struct be_adapter *adapter =
1770 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001771 struct be_queue_info *txq = &adapter->tx_obj.q;
1772 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001774 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775 u16 end_idx;
1776
Sathya Perla5fb379e2009-06-18 00:02:59 +00001777 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001779 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001781 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782 }
1783
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001784 mcc_compl = be_process_mcc(adapter, &status);
1785
1786 napi_complete(napi);
1787
1788 if (mcc_compl) {
1789 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1790 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1791 }
1792
1793 if (tx_compl) {
1794 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001795
1796 /* As Tx wrbs have been freed up, wake up netdev queue if
1797 * it was stopped due to lack of tx wrbs.
1798 */
1799 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001801 netif_wake_queue(adapter->netdev);
1802 }
1803
Sathya Perla3abcded2010-10-03 22:12:27 -07001804 tx_stats(adapter)->be_tx_events++;
1805 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807
1808 return 1;
1809}
1810
Ajit Khaparded053de92010-09-03 06:23:30 +00001811void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001812{
1813 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1814 u32 i;
1815
1816 pci_read_config_dword(adapter->pdev,
1817 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1818 pci_read_config_dword(adapter->pdev,
1819 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1820 pci_read_config_dword(adapter->pdev,
1821 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1822 pci_read_config_dword(adapter->pdev,
1823 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1824
1825 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1826 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1827
Ajit Khaparded053de92010-09-03 06:23:30 +00001828 if (ue_status_lo || ue_status_hi) {
1829 adapter->ue_detected = true;
1830 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1831 }
1832
Ajit Khaparde7c185272010-07-29 06:16:33 +00001833 if (ue_status_lo) {
1834 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1835 if (ue_status_lo & 1)
1836 dev_err(&adapter->pdev->dev,
1837 "UE: %s bit set\n", ue_status_low_desc[i]);
1838 }
1839 }
1840 if (ue_status_hi) {
1841 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1842 if (ue_status_hi & 1)
1843 dev_err(&adapter->pdev->dev,
1844 "UE: %s bit set\n", ue_status_hi_desc[i]);
1845 }
1846 }
1847
1848}
1849
Sathya Perlaea1dae12009-03-19 23:56:20 -07001850static void be_worker(struct work_struct *work)
1851{
1852 struct be_adapter *adapter =
1853 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001854 struct be_rx_obj *rxo;
1855 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001856
Somnath Koturf203af72010-10-25 23:01:03 +00001857 /* when interrupts are not yet enabled, just reap any pending
1858 * mcc completions */
1859 if (!netif_running(adapter->netdev)) {
1860 int mcc_compl, status = 0;
1861
1862 mcc_compl = be_process_mcc(adapter, &status);
1863
1864 if (mcc_compl) {
1865 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1866 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1867 }
1868 goto reschedule;
1869 }
1870
Ajit Khaparde0fc48c32010-07-29 06:18:58 +00001871 if (!adapter->stats_ioctl_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001872 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001873
Sathya Perla4097f662009-03-24 16:40:13 -07001874 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001875
Sathya Perla3abcded2010-10-03 22:12:27 -07001876 for_all_rx_queues(adapter, rxo, i) {
1877 be_rx_rate_update(rxo);
1878 be_rx_eqd_update(adapter, rxo);
1879
1880 if (rxo->rx_post_starved) {
1881 rxo->rx_post_starved = false;
1882 be_post_rx_frags(rxo);
1883 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001884 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001885 if (!adapter->ue_detected && !lancer_chip(adapter))
Ajit Khaparded053de92010-09-03 06:23:30 +00001886 be_detect_dump_ue(adapter);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001887
Somnath Koturf203af72010-10-25 23:01:03 +00001888reschedule:
Sathya Perlaea1dae12009-03-19 23:56:20 -07001889 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1890}
1891
Sathya Perla8d56ff12009-11-22 22:02:26 +00001892static void be_msix_disable(struct be_adapter *adapter)
1893{
1894 if (adapter->msix_enabled) {
1895 pci_disable_msix(adapter->pdev);
1896 adapter->msix_enabled = false;
1897 }
1898}
1899
Sathya Perla3abcded2010-10-03 22:12:27 -07001900static int be_num_rxqs_get(struct be_adapter *adapter)
1901{
1902 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1903 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1904 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1905 } else {
1906 dev_warn(&adapter->pdev->dev,
1907 "No support for multiple RX queues\n");
1908 return 1;
1909 }
1910}
1911
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912static void be_msix_enable(struct be_adapter *adapter)
1913{
Sathya Perla3abcded2010-10-03 22:12:27 -07001914#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915 int i, status;
1916
Sathya Perla3abcded2010-10-03 22:12:27 -07001917 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1918
1919 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920 adapter->msix_entries[i].entry = i;
1921
1922 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perla3abcded2010-10-03 22:12:27 -07001923 adapter->num_rx_qs + 1);
1924 if (status == 0) {
1925 goto done;
1926 } else if (status >= BE_MIN_MSIX_VECTORS) {
1927 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1928 status) == 0) {
1929 adapter->num_rx_qs = status - 1;
1930 dev_warn(&adapter->pdev->dev,
1931 "Could alloc only %d MSIx vectors. "
1932 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1933 goto done;
1934 }
1935 }
1936 return;
1937done:
1938 adapter->msix_enabled = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939}
1940
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001941static void be_sriov_enable(struct be_adapter *adapter)
1942{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001943 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001944#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001945 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001946 int status;
1947
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001948 status = pci_enable_sriov(adapter->pdev, num_vfs);
1949 adapter->sriov_enabled = status ? false : true;
1950 }
1951#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001952}
1953
1954static void be_sriov_disable(struct be_adapter *adapter)
1955{
1956#ifdef CONFIG_PCI_IOV
1957 if (adapter->sriov_enabled) {
1958 pci_disable_sriov(adapter->pdev);
1959 adapter->sriov_enabled = false;
1960 }
1961#endif
1962}
1963
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001964static inline int be_msix_vec_get(struct be_adapter *adapter,
1965 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001967 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00001968}
1969
1970static int be_request_irq(struct be_adapter *adapter,
1971 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001972 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001973{
1974 struct net_device *netdev = adapter->netdev;
1975 int vec;
1976
1977 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001978 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001979 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00001980}
1981
Sathya Perla3abcded2010-10-03 22:12:27 -07001982static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1983 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001984{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001985 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001986 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987}
1988
1989static int be_msix_register(struct be_adapter *adapter)
1990{
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 struct be_rx_obj *rxo;
1992 int status, i;
1993 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994
Sathya Perla3abcded2010-10-03 22:12:27 -07001995 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1996 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001997 if (status)
1998 goto err;
1999
Sathya Perla3abcded2010-10-03 22:12:27 -07002000 for_all_rx_queues(adapter, rxo, i) {
2001 sprintf(qname, "rxq%d", i);
2002 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2003 qname, rxo);
2004 if (status)
2005 goto err_msix;
2006 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002007
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002009
Sathya Perla3abcded2010-10-03 22:12:27 -07002010err_msix:
2011 be_free_irq(adapter, &adapter->tx_eq, adapter);
2012
2013 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2014 be_free_irq(adapter, &rxo->rx_eq, rxo);
2015
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016err:
2017 dev_warn(&adapter->pdev->dev,
2018 "MSIX Request IRQ failed - err %d\n", status);
2019 pci_disable_msix(adapter->pdev);
2020 adapter->msix_enabled = false;
2021 return status;
2022}
2023
2024static int be_irq_register(struct be_adapter *adapter)
2025{
2026 struct net_device *netdev = adapter->netdev;
2027 int status;
2028
2029 if (adapter->msix_enabled) {
2030 status = be_msix_register(adapter);
2031 if (status == 0)
2032 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002033 /* INTx is not supported for VF */
2034 if (!be_physfn(adapter))
2035 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036 }
2037
2038 /* INTx */
2039 netdev->irq = adapter->pdev->irq;
2040 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2041 adapter);
2042 if (status) {
2043 dev_err(&adapter->pdev->dev,
2044 "INTx request IRQ failed - err %d\n", status);
2045 return status;
2046 }
2047done:
2048 adapter->isr_registered = true;
2049 return 0;
2050}
2051
2052static void be_irq_unregister(struct be_adapter *adapter)
2053{
2054 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002055 struct be_rx_obj *rxo;
2056 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057
2058 if (!adapter->isr_registered)
2059 return;
2060
2061 /* INTx */
2062 if (!adapter->msix_enabled) {
2063 free_irq(netdev->irq, adapter);
2064 goto done;
2065 }
2066
2067 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002068 be_free_irq(adapter, &adapter->tx_eq, adapter);
2069
2070 for_all_rx_queues(adapter, rxo, i)
2071 be_free_irq(adapter, &rxo->rx_eq, rxo);
2072
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073done:
2074 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075}
2076
Sathya Perla889cd4b2010-05-30 23:33:45 +00002077static int be_close(struct net_device *netdev)
2078{
2079 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002080 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002081 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002082 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002083
Sathya Perla889cd4b2010-05-30 23:33:45 +00002084 be_async_mcc_disable(adapter);
2085
2086 netif_stop_queue(netdev);
2087 netif_carrier_off(netdev);
2088 adapter->link_up = false;
2089
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002090 if (!lancer_chip(adapter))
2091 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002092
2093 if (adapter->msix_enabled) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002094 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002095 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002096
2097 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002098 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002099 synchronize_irq(vec);
2100 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002101 } else {
2102 synchronize_irq(netdev->irq);
2103 }
2104 be_irq_unregister(adapter);
2105
Sathya Perla3abcded2010-10-03 22:12:27 -07002106 for_all_rx_queues(adapter, rxo, i)
2107 napi_disable(&rxo->rx_eq.napi);
2108
Sathya Perla889cd4b2010-05-30 23:33:45 +00002109 napi_disable(&tx_eq->napi);
2110
2111 /* Wait for all pending tx completions to arrive so that
2112 * all tx skbs are freed.
2113 */
2114 be_tx_compl_clean(adapter);
2115
2116 return 0;
2117}
2118
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119static int be_open(struct net_device *netdev)
2120{
2121 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002123 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002124 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002125 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002126 u8 mac_speed;
2127 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002128
Sathya Perla3abcded2010-10-03 22:12:27 -07002129 for_all_rx_queues(adapter, rxo, i) {
2130 be_post_rx_frags(rxo);
2131 napi_enable(&rxo->rx_eq.napi);
2132 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002133 napi_enable(&tx_eq->napi);
2134
2135 be_irq_register(adapter);
2136
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002137 if (!lancer_chip(adapter))
2138 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002139
2140 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002141 for_all_rx_queues(adapter, rxo, i) {
2142 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2143 be_cq_notify(adapter, rxo->cq.id, true, 0);
2144 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002145 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002146
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002147 /* Now that interrupts are on we can process async mcc */
2148 be_async_mcc_enable(adapter);
2149
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002150 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2151 &link_speed);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002152 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002153 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002154 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002155
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002156 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002157 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002158 if (status)
2159 goto err;
2160
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002161 status = be_cmd_set_flow_control(adapter,
2162 adapter->tx_fc, adapter->rx_fc);
2163 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002164 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002165 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002166
Sathya Perla889cd4b2010-05-30 23:33:45 +00002167 return 0;
2168err:
2169 be_close(adapter->netdev);
2170 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002171}
2172
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002173static int be_setup_wol(struct be_adapter *adapter, bool enable)
2174{
2175 struct be_dma_mem cmd;
2176 int status = 0;
2177 u8 mac[ETH_ALEN];
2178
2179 memset(mac, 0, ETH_ALEN);
2180
2181 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2182 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2183 if (cmd.va == NULL)
2184 return -1;
2185 memset(cmd.va, 0, cmd.size);
2186
2187 if (enable) {
2188 status = pci_write_config_dword(adapter->pdev,
2189 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2190 if (status) {
2191 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002192 "Could not enable Wake-on-lan\n");
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002193 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2194 cmd.dma);
2195 return status;
2196 }
2197 status = be_cmd_enable_magic_wol(adapter,
2198 adapter->netdev->dev_addr, &cmd);
2199 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2200 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2201 } else {
2202 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2203 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2204 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2205 }
2206
2207 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2208 return status;
2209}
2210
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002211/*
2212 * Generate a seed MAC address from the PF MAC Address using jhash.
2213 * MAC Address for VFs are assigned incrementally starting from the seed.
2214 * These addresses are programmed in the ASIC by the PF and the VF driver
2215 * queries for the MAC address during its probe.
2216 */
2217static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2218{
2219 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002221 u8 mac[ETH_ALEN];
2222
2223 be_vf_eth_addr_generate(adapter, mac);
2224
2225 for (vf = 0; vf < num_vfs; vf++) {
2226 status = be_cmd_pmac_add(adapter, mac,
2227 adapter->vf_cfg[vf].vf_if_handle,
2228 &adapter->vf_cfg[vf].vf_pmac_id);
2229 if (status)
2230 dev_err(&adapter->pdev->dev,
2231 "Mac address add failed for VF %d\n", vf);
2232 else
2233 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2234
2235 mac[5] += 1;
2236 }
2237 return status;
2238}
2239
2240static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2241{
2242 u32 vf;
2243
2244 for (vf = 0; vf < num_vfs; vf++) {
2245 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2246 be_cmd_pmac_del(adapter,
2247 adapter->vf_cfg[vf].vf_if_handle,
2248 adapter->vf_cfg[vf].vf_pmac_id);
2249 }
2250}
2251
Sathya Perla5fb379e2009-06-18 00:02:59 +00002252static int be_setup(struct be_adapter *adapter)
2253{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002254 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002255 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002257 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002259 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2260
2261 if (be_physfn(adapter)) {
2262 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2263 BE_IF_FLAGS_PROMISCUOUS |
2264 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2265 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002266
2267 if (be_multi_rxq(adapter)) {
2268 cap_flags |= BE_IF_FLAGS_RSS;
2269 en_flags |= BE_IF_FLAGS_RSS;
2270 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002271 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002272
2273 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2274 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002275 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276 if (status != 0)
2277 goto do_none;
2278
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002279 if (be_physfn(adapter)) {
2280 while (vf < num_vfs) {
2281 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2282 | BE_IF_FLAGS_BROADCAST;
2283 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002284 mac, true,
2285 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002286 NULL, vf+1);
2287 if (status) {
2288 dev_err(&adapter->pdev->dev,
2289 "Interface Create failed for VF %d\n", vf);
2290 goto if_destroy;
2291 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002292 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002293 vf++;
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002294 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002295 } else if (!be_physfn(adapter)) {
2296 status = be_cmd_mac_addr_query(adapter, mac,
2297 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2298 if (!status) {
2299 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2300 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2301 }
2302 }
2303
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304 status = be_tx_queues_create(adapter);
2305 if (status != 0)
2306 goto if_destroy;
2307
2308 status = be_rx_queues_create(adapter);
2309 if (status != 0)
2310 goto tx_qs_destroy;
2311
Sathya Perla5fb379e2009-06-18 00:02:59 +00002312 status = be_mcc_queues_create(adapter);
2313 if (status != 0)
2314 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002315
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002316 if (be_physfn(adapter)) {
2317 status = be_vf_eth_addr_config(adapter);
2318 if (status)
2319 goto mcc_q_destroy;
2320 }
2321
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002322 adapter->link_speed = -1;
2323
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 return 0;
2325
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002326mcc_q_destroy:
2327 if (be_physfn(adapter))
2328 be_vf_eth_addr_rem(adapter);
2329 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002330rx_qs_destroy:
2331 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332tx_qs_destroy:
2333 be_tx_queues_destroy(adapter);
2334if_destroy:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002335 for (vf = 0; vf < num_vfs; vf++)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002336 if (adapter->vf_cfg[vf].vf_if_handle)
2337 be_cmd_if_destroy(adapter,
2338 adapter->vf_cfg[vf].vf_if_handle);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002339 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340do_none:
2341 return status;
2342}
2343
Sathya Perla5fb379e2009-06-18 00:02:59 +00002344static int be_clear(struct be_adapter *adapter)
2345{
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002346 if (be_physfn(adapter))
2347 be_vf_eth_addr_rem(adapter);
2348
Sathya Perla1a8887d2009-08-17 00:58:41 +00002349 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002350 be_rx_queues_destroy(adapter);
2351 be_tx_queues_destroy(adapter);
2352
Sathya Perla8788fdc2009-07-27 22:52:03 +00002353 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002354
Sathya Perla2243e2e2009-11-22 22:02:03 +00002355 /* tell fw we're done with firing cmds */
2356 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002357 return 0;
2358}
2359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002360
Ajit Khaparde84517482009-09-04 03:12:16 +00002361#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002362static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002363 const u8 *p, u32 img_start, int image_size,
2364 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002365{
2366 u32 crc_offset;
2367 u8 flashed_crc[4];
2368 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002369
2370 crc_offset = hdr_size + img_start + image_size - 4;
2371
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002372 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002373
2374 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002375 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002376 if (status) {
2377 dev_err(&adapter->pdev->dev,
2378 "could not get crc from flash, not flashing redboot\n");
2379 return false;
2380 }
2381
2382 /*update redboot only if crc does not match*/
2383 if (!memcmp(flashed_crc, p, 4))
2384 return false;
2385 else
2386 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002387}
2388
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002389static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002390 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002391 struct be_dma_mem *flash_cmd, int num_of_images)
2392
Ajit Khaparde84517482009-09-04 03:12:16 +00002393{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002394 int status = 0, i, filehdr_size = 0;
2395 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002396 int num_bytes;
2397 const u8 *p = fw->data;
2398 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002399 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002400 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002401
Joe Perches215faf92010-12-21 02:16:10 -08002402 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002403 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2404 FLASH_IMAGE_MAX_SIZE_g3},
2405 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2406 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2407 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2408 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2409 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2410 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2412 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2414 FLASH_IMAGE_MAX_SIZE_g3},
2415 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2416 FLASH_IMAGE_MAX_SIZE_g3},
2417 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002418 FLASH_IMAGE_MAX_SIZE_g3},
2419 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2420 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002421 };
Joe Perches215faf92010-12-21 02:16:10 -08002422 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002423 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2424 FLASH_IMAGE_MAX_SIZE_g2},
2425 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2426 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2427 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2428 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2429 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2430 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2432 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2434 FLASH_IMAGE_MAX_SIZE_g2},
2435 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2436 FLASH_IMAGE_MAX_SIZE_g2},
2437 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2438 FLASH_IMAGE_MAX_SIZE_g2}
2439 };
2440
2441 if (adapter->generation == BE_GEN3) {
2442 pflashcomp = gen3_flash_types;
2443 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002444 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002445 } else {
2446 pflashcomp = gen2_flash_types;
2447 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002448 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002449 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002450 for (i = 0; i < num_comp; i++) {
2451 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2452 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2453 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002454 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2455 (!be_flash_redboot(adapter, fw->data,
2456 pflashcomp[i].offset, pflashcomp[i].size,
2457 filehdr_size)))
2458 continue;
2459 p = fw->data;
2460 p += filehdr_size + pflashcomp[i].offset
2461 + (num_of_images * sizeof(struct image_hdr));
2462 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002463 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002464 total_bytes = pflashcomp[i].size;
2465 while (total_bytes) {
2466 if (total_bytes > 32*1024)
2467 num_bytes = 32*1024;
2468 else
2469 num_bytes = total_bytes;
2470 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002471
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002472 if (!total_bytes)
2473 flash_op = FLASHROM_OPER_FLASH;
2474 else
2475 flash_op = FLASHROM_OPER_SAVE;
2476 memcpy(req->params.data_buf, p, num_bytes);
2477 p += num_bytes;
2478 status = be_cmd_write_flashrom(adapter, flash_cmd,
2479 pflashcomp[i].optype, flash_op, num_bytes);
2480 if (status) {
2481 dev_err(&adapter->pdev->dev,
2482 "cmd to write to flash rom failed.\n");
2483 return -1;
2484 }
2485 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002486 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002487 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002488 return 0;
2489}
2490
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002491static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2492{
2493 if (fhdr == NULL)
2494 return 0;
2495 if (fhdr->build[0] == '3')
2496 return BE_GEN3;
2497 else if (fhdr->build[0] == '2')
2498 return BE_GEN2;
2499 else
2500 return 0;
2501}
2502
Ajit Khaparde84517482009-09-04 03:12:16 +00002503int be_load_fw(struct be_adapter *adapter, u8 *func)
2504{
2505 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2506 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002507 struct flash_file_hdr_g2 *fhdr;
2508 struct flash_file_hdr_g3 *fhdr3;
2509 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002510 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002511 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002512 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002513
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002514 if (!netif_running(adapter->netdev)) {
2515 dev_err(&adapter->pdev->dev,
2516 "Firmware load not allowed (interface is down)\n");
2517 return -EPERM;
2518 }
2519
Ajit Khaparde84517482009-09-04 03:12:16 +00002520 strcpy(fw_file, func);
2521
2522 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2523 if (status)
2524 goto fw_exit;
2525
2526 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002527 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002528 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2529
Ajit Khaparde84517482009-09-04 03:12:16 +00002530 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2531 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2532 &flash_cmd.dma);
2533 if (!flash_cmd.va) {
2534 status = -ENOMEM;
2535 dev_err(&adapter->pdev->dev,
2536 "Memory allocation failure while flashing\n");
2537 goto fw_exit;
2538 }
2539
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002540 if ((adapter->generation == BE_GEN3) &&
2541 (get_ufigen_type(fhdr) == BE_GEN3)) {
2542 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002543 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2544 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002545 img_hdr_ptr = (struct image_hdr *) (fw->data +
2546 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002547 i * sizeof(struct image_hdr)));
2548 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2549 status = be_flash_data(adapter, fw, &flash_cmd,
2550 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002551 }
2552 } else if ((adapter->generation == BE_GEN2) &&
2553 (get_ufigen_type(fhdr) == BE_GEN2)) {
2554 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2555 } else {
2556 dev_err(&adapter->pdev->dev,
2557 "UFI and Interface are not compatible for flashing\n");
2558 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002559 }
2560
2561 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2562 flash_cmd.dma);
2563 if (status) {
2564 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2565 goto fw_exit;
2566 }
2567
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002568 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002569
2570fw_exit:
2571 release_firmware(fw);
2572 return status;
2573}
2574
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575static struct net_device_ops be_netdev_ops = {
2576 .ndo_open = be_open,
2577 .ndo_stop = be_close,
2578 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579 .ndo_set_rx_mode = be_set_multicast_list,
2580 .ndo_set_mac_address = be_mac_addr_set,
2581 .ndo_change_mtu = be_change_mtu,
2582 .ndo_validate_addr = eth_validate_addr,
2583 .ndo_vlan_rx_register = be_vlan_register,
2584 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2585 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002586 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002587 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002588 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002589 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002590};
2591
2592static void be_netdev_init(struct net_device *netdev)
2593{
2594 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002595 struct be_rx_obj *rxo;
2596 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597
2598 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Michał Mirosław79032642010-11-30 06:38:00 +00002599 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2600 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ajit Khaparde49e4b8472010-06-14 04:56:07 +00002601 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002602
Michał Mirosław79032642010-11-30 06:38:00 +00002603 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2604 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002605
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002606 if (lancer_chip(adapter))
2607 netdev->vlan_features |= NETIF_F_TSO6;
2608
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002609 netdev->flags |= IFF_MULTICAST;
2610
Ajit Khaparde728a9972009-04-13 15:41:22 -07002611 adapter->rx_csum = true;
2612
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002613 /* Default settings for Rx and Tx flow control */
2614 adapter->rx_fc = true;
2615 adapter->tx_fc = true;
2616
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002617 netif_set_gso_max_size(netdev, 65535);
2618
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002619 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2620
2621 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2622
Sathya Perla3abcded2010-10-03 22:12:27 -07002623 for_all_rx_queues(adapter, rxo, i)
2624 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2625 BE_NAPI_WEIGHT);
2626
Sathya Perla5fb379e2009-06-18 00:02:59 +00002627 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002629}
2630
2631static void be_unmap_pci_bars(struct be_adapter *adapter)
2632{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002633 if (adapter->csr)
2634 iounmap(adapter->csr);
2635 if (adapter->db)
2636 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002637 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002638 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002639}
2640
2641static int be_map_pci_bars(struct be_adapter *adapter)
2642{
2643 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002644 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002645
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002646 if (lancer_chip(adapter)) {
2647 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2648 pci_resource_len(adapter->pdev, 0));
2649 if (addr == NULL)
2650 return -ENOMEM;
2651 adapter->db = addr;
2652 return 0;
2653 }
2654
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002655 if (be_physfn(adapter)) {
2656 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2657 pci_resource_len(adapter->pdev, 2));
2658 if (addr == NULL)
2659 return -ENOMEM;
2660 adapter->csr = addr;
2661 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002663 if (adapter->generation == BE_GEN2) {
2664 pcicfg_reg = 1;
2665 db_reg = 4;
2666 } else {
2667 pcicfg_reg = 0;
2668 if (be_physfn(adapter))
2669 db_reg = 4;
2670 else
2671 db_reg = 0;
2672 }
2673 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2674 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002675 if (addr == NULL)
2676 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002677 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002678
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002679 if (be_physfn(adapter)) {
2680 addr = ioremap_nocache(
2681 pci_resource_start(adapter->pdev, pcicfg_reg),
2682 pci_resource_len(adapter->pdev, pcicfg_reg));
2683 if (addr == NULL)
2684 goto pci_map_err;
2685 adapter->pcicfg = addr;
2686 } else
2687 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002688
2689 return 0;
2690pci_map_err:
2691 be_unmap_pci_bars(adapter);
2692 return -ENOMEM;
2693}
2694
2695
2696static void be_ctrl_cleanup(struct be_adapter *adapter)
2697{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002698 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002699
2700 be_unmap_pci_bars(adapter);
2701
2702 if (mem->va)
2703 pci_free_consistent(adapter->pdev, mem->size,
2704 mem->va, mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002705
2706 mem = &adapter->mc_cmd_mem;
2707 if (mem->va)
2708 pci_free_consistent(adapter->pdev, mem->size,
2709 mem->va, mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710}
2711
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002712static int be_ctrl_init(struct be_adapter *adapter)
2713{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002714 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2715 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002716 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002717 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718
2719 status = be_map_pci_bars(adapter);
2720 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002721 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002722
2723 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2724 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2725 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2726 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002727 status = -ENOMEM;
2728 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002729 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002730
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002731 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2732 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2733 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2734 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002735
2736 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2737 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2738 &mc_cmd_mem->dma);
2739 if (mc_cmd_mem->va == NULL) {
2740 status = -ENOMEM;
2741 goto free_mbox;
2742 }
2743 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2744
Ivan Vecera29849612010-12-14 05:43:19 +00002745 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002746 spin_lock_init(&adapter->mcc_lock);
2747 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002748
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002749 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002750 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002751 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002752
2753free_mbox:
2754 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2755 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2756
2757unmap_pci_bars:
2758 be_unmap_pci_bars(adapter);
2759
2760done:
2761 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002762}
2763
2764static void be_stats_cleanup(struct be_adapter *adapter)
2765{
Sathya Perla3abcded2010-10-03 22:12:27 -07002766 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002767
2768 if (cmd->va)
2769 pci_free_consistent(adapter->pdev, cmd->size,
2770 cmd->va, cmd->dma);
2771}
2772
2773static int be_stats_init(struct be_adapter *adapter)
2774{
Sathya Perla3abcded2010-10-03 22:12:27 -07002775 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002776
2777 cmd->size = sizeof(struct be_cmd_req_get_stats);
2778 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2779 if (cmd->va == NULL)
2780 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002781 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002782 return 0;
2783}
2784
2785static void __devexit be_remove(struct pci_dev *pdev)
2786{
2787 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002788
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002789 if (!adapter)
2790 return;
2791
Somnath Koturf203af72010-10-25 23:01:03 +00002792 cancel_delayed_work_sync(&adapter->work);
2793
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002794 unregister_netdev(adapter->netdev);
2795
Sathya Perla5fb379e2009-06-18 00:02:59 +00002796 be_clear(adapter);
2797
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002798 be_stats_cleanup(adapter);
2799
2800 be_ctrl_cleanup(adapter);
2801
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002802 be_sriov_disable(adapter);
2803
Sathya Perla8d56ff12009-11-22 22:02:26 +00002804 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002805
2806 pci_set_drvdata(pdev, NULL);
2807 pci_release_regions(pdev);
2808 pci_disable_device(pdev);
2809
2810 free_netdev(adapter->netdev);
2811}
2812
Sathya Perla2243e2e2009-11-22 22:02:03 +00002813static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002814{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002815 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002816 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002817
Sathya Perla8788fdc2009-07-27 22:52:03 +00002818 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002819 if (status)
2820 return status;
2821
Sathya Perla3abcded2010-10-03 22:12:27 -07002822 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2823 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002824 if (status)
2825 return status;
2826
2827 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002828
2829 if (be_physfn(adapter)) {
2830 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002831 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002832
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002833 if (status)
2834 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002835
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002836 if (!is_valid_ether_addr(mac))
2837 return -EADDRNOTAVAIL;
2838
2839 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2840 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2841 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002842
Ajit Khaparde3486be22010-07-23 02:04:54 +00002843 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002844 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2845 else
2846 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2847
Sathya Perla2243e2e2009-11-22 22:02:03 +00002848 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002849}
2850
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002851static int be_dev_family_check(struct be_adapter *adapter)
2852{
2853 struct pci_dev *pdev = adapter->pdev;
2854 u32 sli_intf = 0, if_type;
2855
2856 switch (pdev->device) {
2857 case BE_DEVICE_ID1:
2858 case OC_DEVICE_ID1:
2859 adapter->generation = BE_GEN2;
2860 break;
2861 case BE_DEVICE_ID2:
2862 case OC_DEVICE_ID2:
2863 adapter->generation = BE_GEN3;
2864 break;
2865 case OC_DEVICE_ID3:
2866 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2867 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2868 SLI_INTF_IF_TYPE_SHIFT;
2869
2870 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2871 if_type != 0x02) {
2872 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2873 return -EINVAL;
2874 }
2875 if (num_vfs > 0) {
2876 dev_err(&pdev->dev, "VFs not supported\n");
2877 return -EINVAL;
2878 }
2879 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2880 SLI_INTF_FAMILY_SHIFT);
2881 adapter->generation = BE_GEN3;
2882 break;
2883 default:
2884 adapter->generation = 0;
2885 }
2886 return 0;
2887}
2888
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002889static int __devinit be_probe(struct pci_dev *pdev,
2890 const struct pci_device_id *pdev_id)
2891{
2892 int status = 0;
2893 struct be_adapter *adapter;
2894 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002895
2896 status = pci_enable_device(pdev);
2897 if (status)
2898 goto do_none;
2899
2900 status = pci_request_regions(pdev, DRV_NAME);
2901 if (status)
2902 goto disable_dev;
2903 pci_set_master(pdev);
2904
2905 netdev = alloc_etherdev(sizeof(struct be_adapter));
2906 if (netdev == NULL) {
2907 status = -ENOMEM;
2908 goto rel_reg;
2909 }
2910 adapter = netdev_priv(netdev);
2911 adapter->pdev = pdev;
2912 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002913
2914 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00002915 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002916 goto free_netdev;
2917
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002918 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002919 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002920
Yang Hongyange9304382009-04-13 14:40:14 -07002921 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002922 if (!status) {
2923 netdev->features |= NETIF_F_HIGHDMA;
2924 } else {
Yang Hongyange9304382009-04-13 14:40:14 -07002925 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002926 if (status) {
2927 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2928 goto free_netdev;
2929 }
2930 }
2931
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002932 be_sriov_enable(adapter);
2933
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002934 status = be_ctrl_init(adapter);
2935 if (status)
2936 goto free_netdev;
2937
Sathya Perla2243e2e2009-11-22 22:02:03 +00002938 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002939 if (be_physfn(adapter)) {
2940 status = be_cmd_POST(adapter);
2941 if (status)
2942 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002943 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002944
2945 /* tell fw we're ready to fire cmds */
2946 status = be_cmd_fw_init(adapter);
2947 if (status)
2948 goto ctrl_clean;
2949
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07002950 if (be_physfn(adapter)) {
2951 status = be_cmd_reset_function(adapter);
2952 if (status)
2953 goto ctrl_clean;
2954 }
2955
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002956 status = be_stats_init(adapter);
2957 if (status)
2958 goto ctrl_clean;
2959
Sathya Perla2243e2e2009-11-22 22:02:03 +00002960 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002961 if (status)
2962 goto stats_clean;
2963
Sathya Perla3abcded2010-10-03 22:12:27 -07002964 be_msix_enable(adapter);
2965
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002966 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002967
Sathya Perla5fb379e2009-06-18 00:02:59 +00002968 status = be_setup(adapter);
2969 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002970 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002971
Sathya Perla3abcded2010-10-03 22:12:27 -07002972 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002973 status = register_netdev(netdev);
2974 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002975 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00002976 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002977
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002978 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00002979 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980 return 0;
2981
Sathya Perla5fb379e2009-06-18 00:02:59 +00002982unsetup:
2983 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07002984msix_disable:
2985 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002986stats_clean:
2987 be_stats_cleanup(adapter);
2988ctrl_clean:
2989 be_ctrl_cleanup(adapter);
2990free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002991 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002992 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002993 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002994rel_reg:
2995 pci_release_regions(pdev);
2996disable_dev:
2997 pci_disable_device(pdev);
2998do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002999 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003000 return status;
3001}
3002
3003static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3004{
3005 struct be_adapter *adapter = pci_get_drvdata(pdev);
3006 struct net_device *netdev = adapter->netdev;
3007
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003008 if (adapter->wol)
3009 be_setup_wol(adapter, true);
3010
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003011 netif_device_detach(netdev);
3012 if (netif_running(netdev)) {
3013 rtnl_lock();
3014 be_close(netdev);
3015 rtnl_unlock();
3016 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003017 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003018 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019
3020 pci_save_state(pdev);
3021 pci_disable_device(pdev);
3022 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3023 return 0;
3024}
3025
3026static int be_resume(struct pci_dev *pdev)
3027{
3028 int status = 0;
3029 struct be_adapter *adapter = pci_get_drvdata(pdev);
3030 struct net_device *netdev = adapter->netdev;
3031
3032 netif_device_detach(netdev);
3033
3034 status = pci_enable_device(pdev);
3035 if (status)
3036 return status;
3037
3038 pci_set_power_state(pdev, 0);
3039 pci_restore_state(pdev);
3040
Sathya Perla2243e2e2009-11-22 22:02:03 +00003041 /* tell fw we're ready to fire cmds */
3042 status = be_cmd_fw_init(adapter);
3043 if (status)
3044 return status;
3045
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003046 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047 if (netif_running(netdev)) {
3048 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003049 be_open(netdev);
3050 rtnl_unlock();
3051 }
3052 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003053
3054 if (adapter->wol)
3055 be_setup_wol(adapter, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056 return 0;
3057}
3058
Sathya Perla82456b02010-02-17 01:35:37 +00003059/*
3060 * An FLR will stop BE from DMAing any data.
3061 */
3062static void be_shutdown(struct pci_dev *pdev)
3063{
3064 struct be_adapter *adapter = pci_get_drvdata(pdev);
3065 struct net_device *netdev = adapter->netdev;
3066
3067 netif_device_detach(netdev);
3068
3069 be_cmd_reset_function(adapter);
3070
3071 if (adapter->wol)
3072 be_setup_wol(adapter, true);
3073
3074 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003075}
3076
Sathya Perlacf588472010-02-14 21:22:01 +00003077static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3078 pci_channel_state_t state)
3079{
3080 struct be_adapter *adapter = pci_get_drvdata(pdev);
3081 struct net_device *netdev = adapter->netdev;
3082
3083 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3084
3085 adapter->eeh_err = true;
3086
3087 netif_device_detach(netdev);
3088
3089 if (netif_running(netdev)) {
3090 rtnl_lock();
3091 be_close(netdev);
3092 rtnl_unlock();
3093 }
3094 be_clear(adapter);
3095
3096 if (state == pci_channel_io_perm_failure)
3097 return PCI_ERS_RESULT_DISCONNECT;
3098
3099 pci_disable_device(pdev);
3100
3101 return PCI_ERS_RESULT_NEED_RESET;
3102}
3103
3104static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3105{
3106 struct be_adapter *adapter = pci_get_drvdata(pdev);
3107 int status;
3108
3109 dev_info(&adapter->pdev->dev, "EEH reset\n");
3110 adapter->eeh_err = false;
3111
3112 status = pci_enable_device(pdev);
3113 if (status)
3114 return PCI_ERS_RESULT_DISCONNECT;
3115
3116 pci_set_master(pdev);
3117 pci_set_power_state(pdev, 0);
3118 pci_restore_state(pdev);
3119
3120 /* Check if card is ok and fw is ready */
3121 status = be_cmd_POST(adapter);
3122 if (status)
3123 return PCI_ERS_RESULT_DISCONNECT;
3124
3125 return PCI_ERS_RESULT_RECOVERED;
3126}
3127
3128static void be_eeh_resume(struct pci_dev *pdev)
3129{
3130 int status = 0;
3131 struct be_adapter *adapter = pci_get_drvdata(pdev);
3132 struct net_device *netdev = adapter->netdev;
3133
3134 dev_info(&adapter->pdev->dev, "EEH resume\n");
3135
3136 pci_save_state(pdev);
3137
3138 /* tell fw we're ready to fire cmds */
3139 status = be_cmd_fw_init(adapter);
3140 if (status)
3141 goto err;
3142
3143 status = be_setup(adapter);
3144 if (status)
3145 goto err;
3146
3147 if (netif_running(netdev)) {
3148 status = be_open(netdev);
3149 if (status)
3150 goto err;
3151 }
3152 netif_device_attach(netdev);
3153 return;
3154err:
3155 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003156}
3157
3158static struct pci_error_handlers be_eeh_handlers = {
3159 .error_detected = be_eeh_err_detected,
3160 .slot_reset = be_eeh_reset,
3161 .resume = be_eeh_resume,
3162};
3163
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003164static struct pci_driver be_driver = {
3165 .name = DRV_NAME,
3166 .id_table = be_dev_ids,
3167 .probe = be_probe,
3168 .remove = be_remove,
3169 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003170 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003171 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003172 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003173};
3174
3175static int __init be_init_module(void)
3176{
Joe Perches8e95a202009-12-03 07:58:21 +00003177 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3178 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003179 printk(KERN_WARNING DRV_NAME
3180 " : Module param rx_frag_size must be 2048/4096/8192."
3181 " Using 2048\n");
3182 rx_frag_size = 2048;
3183 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003184
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003185 if (num_vfs > 32) {
3186 printk(KERN_WARNING DRV_NAME
3187 " : Module param num_vfs must not be greater than 32."
3188 "Using 32\n");
3189 num_vfs = 32;
3190 }
3191
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192 return pci_register_driver(&be_driver);
3193}
3194module_init(be_init_module);
3195
3196static void __exit be_exit_module(void)
3197{
3198 pci_unregister_driver(&be_driver);
3199}
3200module_exit(be_exit_module);