blob: fc119d1f542bb3cdc20aee3f482dbdff4ae6ab58 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030module_param(rx_frag_size, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
Sathya Perla3abcded2010-10-03 22:12:27 -0700119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130}
131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143 if (!mem->va)
144 return -1;
145 memset(mem->va, 0, mem->size);
146 return 0;
147}
148
Sathya Perla8788fdc2009-07-27 22:52:03 +0000149static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000151 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 u32 reg = ioread32(addr);
153 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perlacf588472010-02-14 21:22:01 +0000155 if (adapter->eeh_err)
156 return;
157
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700159 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 iowrite32(reg, addr);
166}
167
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169{
170 u32 val = 0;
171 val |= qid & DB_RQ_RING_ID_MASK;
172 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000173
174 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000175 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176}
177
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179{
180 u32 val = 0;
181 val |= qid & DB_TXULP_RING_ID_MASK;
182 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000183
184 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189 bool arm, bool clear_int, u16 num_popped)
190{
191 u32 val = 0;
192 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000193 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000195
196 if (adapter->eeh_err)
197 return;
198
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199 if (arm)
200 val |= 1 << DB_EQ_REARM_SHIFT;
201 if (clear_int)
202 val |= 1 << DB_EQ_CLR_SHIFT;
203 val |= 1 << DB_EQ_EVNT_SHIFT;
204 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000205 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209{
210 u32 val = 0;
211 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
215 if (adapter->eeh_err)
216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_CQ_REARM_SHIFT;
220 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000221 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222}
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224static int be_mac_addr_set(struct net_device *netdev, void *p)
225{
226 struct be_adapter *adapter = netdev_priv(netdev);
227 struct sockaddr *addr = p;
228 int status = 0;
229
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000230 if (!is_valid_ether_addr(addr->sa_data))
231 return -EADDRNOTAVAIL;
232
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000233 /* MAC addr configuration will be done in hardware for VFs
234 * by their corresponding PFs. Just copy to netdev addr here
235 */
236 if (!be_physfn(adapter))
237 goto netdev_addr;
238
Sathya Perlaa65027e2009-08-17 00:58:04 +0000239 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
240 if (status)
241 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242
Sathya Perlaa65027e2009-08-17 00:58:04 +0000243 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
244 adapter->if_handle, &adapter->pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000245netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246 if (!status)
247 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
248
249 return status;
250}
251
Sathya Perlab31c50a2009-09-17 10:30:13 -0700252void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253{
Sathya Perla3abcded2010-10-03 22:12:27 -0700254 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
256 struct be_port_rxf_stats *port_stats =
257 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700258 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000259 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700260 struct be_rx_obj *rxo;
261 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262
Sathya Perla3abcded2010-10-03 22:12:27 -0700263 memset(dev_stats, 0, sizeof(*dev_stats));
264 for_all_rx_queues(adapter, rxo, i) {
265 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
266 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
267 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
268 /* no space in linux buffers: best possible approximation */
269 dev_stats->rx_dropped +=
270 erx_stats->rx_drops_no_fragments[rxo->q.id];
271 }
272
273 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
274 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700275
276 /* bad pkts received */
277 dev_stats->rx_errors = port_stats->rx_crc_errors +
278 port_stats->rx_alignment_symbol_errors +
279 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000280 port_stats->rx_out_range_errors +
281 port_stats->rx_frame_too_long +
282 port_stats->rx_dropped_too_small +
283 port_stats->rx_dropped_too_short +
284 port_stats->rx_dropped_header_too_small +
285 port_stats->rx_dropped_tcp_length +
286 port_stats->rx_dropped_runt +
287 port_stats->rx_tcp_checksum_errs +
288 port_stats->rx_ip_checksum_errs +
289 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700290
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700291 /* detailed rx errors */
292 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000293 port_stats->rx_out_range_errors +
294 port_stats->rx_frame_too_long;
295
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700296 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
297
298 /* frame alignment errors */
299 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000300
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 /* receiver fifo overrun */
302 /* drops_no_pbuf is no per i/f, it's per BE card */
303 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
304 port_stats->rx_input_fifo_overflow +
305 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306}
307
Sathya Perla8788fdc2009-07-27 22:52:03 +0000308void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700309{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310 struct net_device *netdev = adapter->netdev;
311
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000313 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000314 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000315 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700316 netif_carrier_on(netdev);
317 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000318 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000319 netif_carrier_off(netdev);
320 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700321 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000322 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700323 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700324}
325
326/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700327static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700328{
Sathya Perla3abcded2010-10-03 22:12:27 -0700329 struct be_eq_obj *rx_eq = &rxo->rx_eq;
330 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700331 ulong now = jiffies;
332 u32 eqd;
333
334 if (!rx_eq->enable_aic)
335 return;
336
337 /* Wrapped around */
338 if (time_before(now, stats->rx_fps_jiffies)) {
339 stats->rx_fps_jiffies = now;
340 return;
341 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700342
343 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700344 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700345 return;
346
Sathya Perla3abcded2010-10-03 22:12:27 -0700347 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700348 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700349
Sathya Perla4097f662009-03-24 16:40:13 -0700350 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700351 stats->prev_rx_frags = stats->rx_frags;
352 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700353 eqd = eqd << 3;
354 if (eqd > rx_eq->max_eqd)
355 eqd = rx_eq->max_eqd;
356 if (eqd < rx_eq->min_eqd)
357 eqd = rx_eq->min_eqd;
358 if (eqd < 10)
359 eqd = 0;
360 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000361 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700362
363 rx_eq->cur_eqd = eqd;
364}
365
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700366static u32 be_calc_rate(u64 bytes, unsigned long ticks)
367{
368 u64 rate = bytes;
369
370 do_div(rate, ticks / HZ);
371 rate <<= 3; /* bytes/sec -> bits/sec */
372 do_div(rate, 1000000ul); /* MB/Sec */
373
374 return rate;
375}
376
Sathya Perla4097f662009-03-24 16:40:13 -0700377static void be_tx_rate_update(struct be_adapter *adapter)
378{
Sathya Perla3abcded2010-10-03 22:12:27 -0700379 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700380 ulong now = jiffies;
381
382 /* Wrapped around? */
383 if (time_before(now, stats->be_tx_jiffies)) {
384 stats->be_tx_jiffies = now;
385 return;
386 }
387
388 /* Update tx rate once in two seconds */
389 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700390 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
391 - stats->be_tx_bytes_prev,
392 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700393 stats->be_tx_jiffies = now;
394 stats->be_tx_bytes_prev = stats->be_tx_bytes;
395 }
396}
397
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700398static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000399 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700400{
Sathya Perla3abcded2010-10-03 22:12:27 -0700401 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700402 stats->be_tx_reqs++;
403 stats->be_tx_wrbs += wrb_cnt;
404 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000405 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700406 if (stopped)
407 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700408}
409
410/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000411static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
412 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700413{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700414 int cnt = (skb->len > skb->data_len);
415
416 cnt += skb_shinfo(skb)->nr_frags;
417
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700418 /* to account for hdr wrb */
419 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000420 if (lancer_chip(adapter) || !(cnt & 1)) {
421 *dummy = false;
422 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700423 /* add a dummy to make it an even num */
424 cnt++;
425 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000426 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700427 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
428 return cnt;
429}
430
431static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
432{
433 wrb->frag_pa_hi = upper_32_bits(addr);
434 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
435 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
436}
437
Somnath Koturcc4ce022010-10-21 07:11:14 -0700438static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
439 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700440{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700441 u8 vlan_prio = 0;
442 u16 vlan_tag = 0;
443
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700444 memset(hdr, 0, sizeof(*hdr));
445
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
447
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000448 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
451 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000452 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000453 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000454 if (lancer_chip(adapter) && adapter->sli_family ==
455 LANCER_A0_SLI_FAMILY) {
456 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
457 if (is_tcp_pkt(skb))
458 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
459 tcpcs, hdr, 1);
460 else if (is_udp_pkt(skb))
461 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
462 udpcs, hdr, 1);
463 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700464 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
465 if (is_tcp_pkt(skb))
466 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
467 else if (is_udp_pkt(skb))
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
469 }
470
Somnath Koturcc4ce022010-10-21 07:11:14 -0700471 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700473 vlan_tag = vlan_tx_tag_get(skb);
474 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
475 /* If vlan priority provided by OS is NOT in available bmap */
476 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
477 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
478 adapter->recommended_prio;
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480 }
481
482 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
486}
487
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000488static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000489 bool unmap_single)
490{
491 dma_addr_t dma;
492
493 be_dws_le_to_cpu(wrb, sizeof(*wrb));
494
495 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000496 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000497 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000498 dma_unmap_single(dev, dma, wrb->frag_len,
499 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000500 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000501 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000502 }
503}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700504
505static int make_tx_wrbs(struct be_adapter *adapter,
506 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
507{
Sathya Perla7101e112010-03-22 20:41:12 +0000508 dma_addr_t busaddr;
509 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000510 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511 struct sk_buff *first_skb = skb;
512 struct be_queue_info *txq = &adapter->tx_obj.q;
513 struct be_eth_wrb *wrb;
514 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000515 bool map_single = false;
516 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518 hdr = queue_head_node(txq);
519 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000520 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700521
David S. Millerebc8d2a2009-06-09 01:01:31 -0700522 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700523 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000524 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
525 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000526 goto dma_err;
527 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700528 wrb = queue_head_node(txq);
529 wrb_fill(wrb, busaddr, len);
530 be_dws_cpu_to_le(wrb, sizeof(*wrb));
531 queue_head_inc(txq);
532 copied += len;
533 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534
David S. Millerebc8d2a2009-06-09 01:01:31 -0700535 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
536 struct skb_frag_struct *frag =
537 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000538 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
539 frag->size, DMA_TO_DEVICE);
540 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000541 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700542 wrb = queue_head_node(txq);
543 wrb_fill(wrb, busaddr, frag->size);
544 be_dws_cpu_to_le(wrb, sizeof(*wrb));
545 queue_head_inc(txq);
546 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700547 }
548
549 if (dummy_wrb) {
550 wrb = queue_head_node(txq);
551 wrb_fill(wrb, 0, 0);
552 be_dws_cpu_to_le(wrb, sizeof(*wrb));
553 queue_head_inc(txq);
554 }
555
Somnath Koturcc4ce022010-10-21 07:11:14 -0700556 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557 be_dws_cpu_to_le(hdr, sizeof(*hdr));
558
559 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000560dma_err:
561 txq->head = map_head;
562 while (copied) {
563 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000564 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000565 map_single = false;
566 copied -= wrb->frag_len;
567 queue_head_inc(txq);
568 }
569 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700570}
571
Stephen Hemminger613573252009-08-31 19:50:58 +0000572static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700573 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700574{
575 struct be_adapter *adapter = netdev_priv(netdev);
576 struct be_tx_obj *tx_obj = &adapter->tx_obj;
577 struct be_queue_info *txq = &tx_obj->q;
578 u32 wrb_cnt = 0, copied = 0;
579 u32 start = txq->head;
580 bool dummy_wrb, stopped = false;
581
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000582 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583
584 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000585 if (copied) {
586 /* record the sent skb in the sent_skb table */
587 BUG_ON(tx_obj->sent_skb_list[start]);
588 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000590 /* Ensure txq has space for the next skb; Else stop the queue
591 * *BEFORE* ringing the tx doorbell, so that we serialze the
592 * tx compls of the current transmit which'll wake up the queue
593 */
Sathya Perla7101e112010-03-22 20:41:12 +0000594 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000595 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
596 txq->len) {
597 netif_stop_queue(netdev);
598 stopped = true;
599 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700600
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000601 be_txq_notify(adapter, txq->id, wrb_cnt);
602
Ajit Khaparde91992e42010-02-19 13:57:12 +0000603 be_tx_stats_update(adapter, wrb_cnt, copied,
604 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000605 } else {
606 txq->head = start;
607 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 return NETDEV_TX_OK;
610}
611
612static int be_change_mtu(struct net_device *netdev, int new_mtu)
613{
614 struct be_adapter *adapter = netdev_priv(netdev);
615 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000616 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
617 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 dev_info(&adapter->pdev->dev,
619 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000620 BE_MIN_MTU,
621 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 return -EINVAL;
623 }
624 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
625 netdev->mtu, new_mtu);
626 netdev->mtu = new_mtu;
627 return 0;
628}
629
630/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000631 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
632 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000634static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 u16 vtag[BE_NUM_VLANS_SUPPORTED];
637 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000638 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000639 u32 if_handle;
640
641 if (vf) {
642 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
643 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
644 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
645 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646
Ajit Khaparde82903e42010-02-09 01:34:57 +0000647 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000649 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650 if (adapter->vlan_tag[i]) {
651 vtag[ntags] = cpu_to_le16(i);
652 ntags++;
653 }
654 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700655 status = be_cmd_vlan_config(adapter, adapter->if_handle,
656 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700658 status = be_cmd_vlan_config(adapter, adapter->if_handle,
659 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000661
Sathya Perlab31c50a2009-09-17 10:30:13 -0700662 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
665static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
666{
667 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670}
671
672static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
673{
674 struct be_adapter *adapter = netdev_priv(netdev);
675
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000676 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000677 if (!be_physfn(adapter))
678 return;
679
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000681 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000682 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683}
684
685static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
686{
687 struct be_adapter *adapter = netdev_priv(netdev);
688
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000689 adapter->vlans_added--;
690 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
691
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000692 if (!be_physfn(adapter))
693 return;
694
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000696 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000697 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700static void be_set_multicast_list(struct net_device *netdev)
701{
702 struct be_adapter *adapter = netdev_priv(netdev);
703
704 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000705 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000706 adapter->promiscuous = true;
707 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000709
710 /* BE was previously in promiscous mode; disable it */
711 if (adapter->promiscuous) {
712 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000713 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000714 }
715
Sathya Perlae7b909a2009-11-22 22:01:10 +0000716 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000717 if (netdev->flags & IFF_ALLMULTI ||
718 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000719 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000720 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000721 goto done;
722 }
723
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000724 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800725 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000726done:
727 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700728}
729
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000730static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
731{
732 struct be_adapter *adapter = netdev_priv(netdev);
733 int status;
734
735 if (!adapter->sriov_enabled)
736 return -EPERM;
737
738 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
739 return -EINVAL;
740
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000741 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
742 status = be_cmd_pmac_del(adapter,
743 adapter->vf_cfg[vf].vf_if_handle,
744 adapter->vf_cfg[vf].vf_pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000745
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000746 status = be_cmd_pmac_add(adapter, mac,
747 adapter->vf_cfg[vf].vf_if_handle,
748 &adapter->vf_cfg[vf].vf_pmac_id);
749
750 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000751 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
752 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000753 else
754 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
755
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000756 return status;
757}
758
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000759static int be_get_vf_config(struct net_device *netdev, int vf,
760 struct ifla_vf_info *vi)
761{
762 struct be_adapter *adapter = netdev_priv(netdev);
763
764 if (!adapter->sriov_enabled)
765 return -EPERM;
766
767 if (vf >= num_vfs)
768 return -EINVAL;
769
770 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000771 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000772 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000773 vi->qos = 0;
774 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
775
776 return 0;
777}
778
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000779static int be_set_vf_vlan(struct net_device *netdev,
780 int vf, u16 vlan, u8 qos)
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
783 int status = 0;
784
785 if (!adapter->sriov_enabled)
786 return -EPERM;
787
788 if ((vf >= num_vfs) || (vlan > 4095))
789 return -EINVAL;
790
791 if (vlan) {
792 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
793 adapter->vlans_added++;
794 } else {
795 adapter->vf_cfg[vf].vf_vlan_tag = 0;
796 adapter->vlans_added--;
797 }
798
799 status = be_vid_config(adapter, true, vf);
800
801 if (status)
802 dev_info(&adapter->pdev->dev,
803 "VLAN %d config on VF %d failed\n", vlan, vf);
804 return status;
805}
806
Ajit Khapardee1d18732010-07-23 01:52:13 +0000807static int be_set_vf_tx_rate(struct net_device *netdev,
808 int vf, int rate)
809{
810 struct be_adapter *adapter = netdev_priv(netdev);
811 int status = 0;
812
813 if (!adapter->sriov_enabled)
814 return -EPERM;
815
816 if ((vf >= num_vfs) || (rate < 0))
817 return -EINVAL;
818
819 if (rate > 10000)
820 rate = 10000;
821
822 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000823 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000824
825 if (status)
826 dev_info(&adapter->pdev->dev,
827 "tx rate %d on VF %d failed\n", rate, vf);
828 return status;
829}
830
Sathya Perla3abcded2010-10-03 22:12:27 -0700831static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832{
Sathya Perla3abcded2010-10-03 22:12:27 -0700833 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700834 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Sathya Perla4097f662009-03-24 16:40:13 -0700836 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700837 if (time_before(now, stats->rx_jiffies)) {
838 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700839 return;
840 }
841
842 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700843 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700844 return;
845
Sathya Perla3abcded2010-10-03 22:12:27 -0700846 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
847 now - stats->rx_jiffies);
848 stats->rx_jiffies = now;
849 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700850}
851
Sathya Perla3abcded2010-10-03 22:12:27 -0700852static void be_rx_stats_update(struct be_rx_obj *rxo,
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000853 u32 pktsize, u16 numfrags, u8 pkt_type)
Sathya Perla4097f662009-03-24 16:40:13 -0700854{
Sathya Perla3abcded2010-10-03 22:12:27 -0700855 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700856
Sathya Perla3abcded2010-10-03 22:12:27 -0700857 stats->rx_compl++;
858 stats->rx_frags += numfrags;
859 stats->rx_bytes += pktsize;
860 stats->rx_pkts++;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000861 if (pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700862 stats->rx_mcast_pkts++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863}
864
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000865static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700866{
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000867 u8 l4_cksm, ipv6, ipcksm;
Ajit Khaparde728a9972009-04-13 15:41:22 -0700868
869 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
870 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000871 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700872
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000873 /* Ignore ipcksm for ipv6 pkts */
874 return l4_cksm && (ipcksm || ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700875}
876
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700878get_rx_page_info(struct be_adapter *adapter,
879 struct be_rx_obj *rxo,
880 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881{
882 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700883 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884
Sathya Perla3abcded2010-10-03 22:12:27 -0700885 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 BUG_ON(!rx_page_info->page);
887
Ajit Khaparde205859a2010-02-09 01:34:21 +0000888 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000889 dma_unmap_page(&adapter->pdev->dev,
890 dma_unmap_addr(rx_page_info, bus),
891 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000892 rx_page_info->last_page_user = false;
893 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700894
895 atomic_dec(&rxq->used);
896 return rx_page_info;
897}
898
899/* Throwaway the data in the Rx completion */
900static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700901 struct be_rx_obj *rxo,
902 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700903{
Sathya Perla3abcded2010-10-03 22:12:27 -0700904 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700905 struct be_rx_page_info *page_info;
906 u16 rxq_idx, i, num_rcvd;
907
908 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
909 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
910
Sathya Perla64642812010-12-01 01:04:17 +0000911 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
912 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
913
914 rxo->last_frag_index = rxq_idx;
915
916 for (i = 0; i < num_rcvd; i++) {
917 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
918 put_page(page_info->page);
919 memset(page_info, 0, sizeof(*page_info));
920 index_inc(&rxq_idx, rxq->len);
921 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700922 }
923}
924
925/*
926 * skb_fill_rx_data forms a complete skb for an ether frame
927 * indicated by rxcp.
928 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700929static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla89420422010-02-17 01:35:26 +0000930 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
931 u16 num_rcvd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932{
Sathya Perla3abcded2010-10-03 22:12:27 -0700933 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700934 struct be_rx_page_info *page_info;
Sathya Perla89420422010-02-17 01:35:26 +0000935 u16 rxq_idx, i, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700936 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700937 u8 *start;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000938 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700939
940 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
941 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000942 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943
Sathya Perla3abcded2010-10-03 22:12:27 -0700944 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700945
946 start = page_address(page_info->page) + page_info->page_offset;
947 prefetch(start);
948
949 /* Copy data in the first descriptor of this completion */
950 curr_frag_len = min(pktsize, rx_frag_size);
951
952 /* Copy the header portion into skb_data */
953 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
954 memcpy(skb->data, start, hdr_len);
955 skb->len = curr_frag_len;
956 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
957 /* Complete packet has now been moved to data */
958 put_page(page_info->page);
959 skb->data_len = 0;
960 skb->tail += curr_frag_len;
961 } else {
962 skb_shinfo(skb)->nr_frags = 1;
963 skb_shinfo(skb)->frags[0].page = page_info->page;
964 skb_shinfo(skb)->frags[0].page_offset =
965 page_info->page_offset + hdr_len;
966 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
967 skb->data_len = curr_frag_len - hdr_len;
968 skb->tail += hdr_len;
969 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000970 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700971
972 if (pktsize <= rx_frag_size) {
973 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000974 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700975 }
976
977 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700978 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000979 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700980 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700981 index_inc(&rxq_idx, rxq->len);
Sathya Perla3abcded2010-10-03 22:12:27 -0700982 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700983
Ajit Khapardefa774062009-07-22 09:28:55 -0700984 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700985
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000986 /* Coalesce all frags from the same physical page in one slot */
987 if (page_info->page_offset == 0) {
988 /* Fresh page */
989 j++;
990 skb_shinfo(skb)->frags[j].page = page_info->page;
991 skb_shinfo(skb)->frags[j].page_offset =
992 page_info->page_offset;
993 skb_shinfo(skb)->frags[j].size = 0;
994 skb_shinfo(skb)->nr_frags++;
995 } else {
996 put_page(page_info->page);
997 }
998
999 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000 skb->len += curr_frag_len;
1001 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002
Ajit Khaparde205859a2010-02-09 01:34:21 +00001003 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001004 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001005 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006
Sathya Perla76fbb422009-06-10 02:21:56 +00001007done:
Sathya Perla3abcded2010-10-03 22:12:27 -07001008 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009}
1010
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001011/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001013 struct be_rx_obj *rxo,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014 struct be_eth_rx_compl *rxcp)
1015{
1016 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001017 u32 vlanf, vid;
Sathya Perla89420422010-02-17 01:35:26 +00001018 u16 num_rcvd;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001019 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020
Sathya Perla89420422010-02-17 01:35:26 +00001021 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
Sathya Perla89420422010-02-17 01:35:26 +00001022
Eric Dumazet89d71a62009-10-13 05:34:20 +00001023 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001024 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025 if (net_ratelimit())
1026 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001027 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028 return;
1029 }
1030
Sathya Perla3abcded2010-10-03 22:12:27 -07001031 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001032
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001033 if (likely(adapter->rx_csum && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001034 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001035 else
1036 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001037
1038 skb->truesize = skb->len + sizeof(struct sk_buff);
1039 skb->protocol = eth_type_trans(skb, adapter->netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040
Sathya Perlaa058a632010-02-17 01:34:22 +00001041 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1042 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1043
1044 /* vlanf could be wrongly set in some cards.
1045 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001046 if ((adapter->function_mode & 0x400) && !vtm)
Sathya Perlaa058a632010-02-17 01:34:22 +00001047 vlanf = 0;
1048
1049 if (unlikely(vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001050 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051 kfree_skb(skb);
1052 return;
1053 }
1054 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001055 if (!lancer_chip(adapter))
1056 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1058 } else {
1059 netif_receive_skb(skb);
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061}
1062
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001063/* Process the RX completion indicated by rxcp when GRO is enabled */
1064static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001065 struct be_rx_obj *rxo,
1066 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067{
1068 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001069 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001070 struct be_queue_info *rxq = &rxo->q;
1071 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001073 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001074 u8 vtm;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001075 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076
1077 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1078 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1079 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1080 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001081 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001082 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001083
1084 /* vlanf could be wrongly set in some cards.
1085 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001086 if ((adapter->function_mode & 0x400) && !vtm)
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001087 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001089 skb = napi_get_frags(&eq_obj->napi);
1090 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001091 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001092 return;
1093 }
1094
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001096 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001097 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098
1099 curr_frag_len = min(remaining, rx_frag_size);
1100
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001101 /* Coalesce all frags from the same physical page in one slot */
1102 if (i == 0 || page_info->page_offset == 0) {
1103 /* First frag or Fresh page */
1104 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001105 skb_shinfo(skb)->frags[j].page = page_info->page;
1106 skb_shinfo(skb)->frags[j].page_offset =
1107 page_info->page_offset;
1108 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001109 } else {
1110 put_page(page_info->page);
1111 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001112 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001113
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001114 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 memset(page_info, 0, sizeof(*page_info));
1117 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001118 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001120 skb_shinfo(skb)->nr_frags = j + 1;
1121 skb->len = pkt_size;
1122 skb->data_len = pkt_size;
1123 skb->truesize += pkt_size;
1124 skb->ip_summed = CHECKSUM_UNNECESSARY;
1125
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001127 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128 } else {
1129 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001130 if (!lancer_chip(adapter))
1131 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132
Ajit Khaparde82903e42010-02-09 01:34:57 +00001133 if (!adapter->vlan_grp || adapter->vlans_added == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134 return;
1135
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001136 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001137 }
1138
Sathya Perla3abcded2010-10-03 22:12:27 -07001139 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001140}
1141
Sathya Perla3abcded2010-10-03 22:12:27 -07001142static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143{
Sathya Perla3abcded2010-10-03 22:12:27 -07001144 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145
1146 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1147 return NULL;
1148
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001149 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1151
Sathya Perla3abcded2010-10-03 22:12:27 -07001152 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153 return rxcp;
1154}
1155
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001156/* To reset the valid bit, we need to reset the whole word as
1157 * when walking the queue the valid entries are little-endian
1158 * and invalid entries are host endian
1159 */
1160static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1161{
1162 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1163}
1164
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165static inline struct page *be_alloc_pages(u32 size)
1166{
1167 gfp_t alloc_flags = GFP_ATOMIC;
1168 u32 order = get_order(size);
1169 if (order > 0)
1170 alloc_flags |= __GFP_COMP;
1171 return alloc_pages(alloc_flags, order);
1172}
1173
1174/*
1175 * Allocate a page, split it to fragments of size rx_frag_size and post as
1176 * receive buffers to BE
1177 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001178static void be_post_rx_frags(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179{
Sathya Perla3abcded2010-10-03 22:12:27 -07001180 struct be_adapter *adapter = rxo->adapter;
1181 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001182 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001183 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 struct page *pagep = NULL;
1185 struct be_eth_rx_d *rxd;
1186 u64 page_dmaaddr = 0, frag_dmaaddr;
1187 u32 posted, page_offset = 0;
1188
Sathya Perla3abcded2010-10-03 22:12:27 -07001189 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1191 if (!pagep) {
1192 pagep = be_alloc_pages(adapter->big_page_size);
1193 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001194 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195 break;
1196 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001197 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1198 0, adapter->big_page_size,
1199 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 page_info->page_offset = 0;
1201 } else {
1202 get_page(pagep);
1203 page_info->page_offset = page_offset + rx_frag_size;
1204 }
1205 page_offset = page_info->page_offset;
1206 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001207 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1209
1210 rxd = queue_head_node(rxq);
1211 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1212 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001213
1214 /* Any space left in the current big page for another frag? */
1215 if ((page_offset + rx_frag_size + rx_frag_size) >
1216 adapter->big_page_size) {
1217 pagep = NULL;
1218 page_info->last_page_user = true;
1219 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001220
1221 prev_page_info = page_info;
1222 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223 page_info = &page_info_tbl[rxq->head];
1224 }
1225 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001226 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001227
1228 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001230 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001231 } else if (atomic_read(&rxq->used) == 0) {
1232 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001233 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235}
1236
Sathya Perla5fb379e2009-06-18 00:02:59 +00001237static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1240
1241 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1242 return NULL;
1243
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001244 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1246
1247 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1248
1249 queue_tail_inc(tx_cq);
1250 return txcp;
1251}
1252
1253static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1254{
1255 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001256 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1258 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001259 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1260 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001262 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001264 sent_skbs[txq->tail] = NULL;
1265
1266 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001267 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001268
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001269 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001271 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001272 unmap_tx_frag(&adapter->pdev->dev, wrb,
1273 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001274 unmap_skb_hdr = false;
1275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276 num_wrbs++;
1277 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001278 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279
1280 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001281
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282 kfree_skb(sent_skb);
1283}
1284
Sathya Perla859b1e42009-08-10 03:43:51 +00001285static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1286{
1287 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1288
1289 if (!eqe->evt)
1290 return NULL;
1291
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001292 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001293 eqe->evt = le32_to_cpu(eqe->evt);
1294 queue_tail_inc(&eq_obj->q);
1295 return eqe;
1296}
1297
1298static int event_handle(struct be_adapter *adapter,
1299 struct be_eq_obj *eq_obj)
1300{
1301 struct be_eq_entry *eqe;
1302 u16 num = 0;
1303
1304 while ((eqe = event_get(eq_obj)) != NULL) {
1305 eqe->evt = 0;
1306 num++;
1307 }
1308
1309 /* Deal with any spurious interrupts that come
1310 * without events
1311 */
1312 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1313 if (num)
1314 napi_schedule(&eq_obj->napi);
1315
1316 return num;
1317}
1318
1319/* Just read and notify events without processing them.
1320 * Used at the time of destroying event queues */
1321static void be_eq_clean(struct be_adapter *adapter,
1322 struct be_eq_obj *eq_obj)
1323{
1324 struct be_eq_entry *eqe;
1325 u16 num = 0;
1326
1327 while ((eqe = event_get(eq_obj)) != NULL) {
1328 eqe->evt = 0;
1329 num++;
1330 }
1331
1332 if (num)
1333 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1334}
1335
Sathya Perla3abcded2010-10-03 22:12:27 -07001336static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337{
1338 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001339 struct be_queue_info *rxq = &rxo->q;
1340 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 struct be_eth_rx_compl *rxcp;
1342 u16 tail;
1343
1344 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001345 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1346 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001347 be_rx_compl_reset(rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001348 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349 }
1350
1351 /* Then free posted rx buffer that were not used */
1352 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001353 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001354 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 put_page(page_info->page);
1356 memset(page_info, 0, sizeof(*page_info));
1357 }
1358 BUG_ON(atomic_read(&rxq->used));
1359}
1360
Sathya Perlaa8e91792009-08-10 03:42:43 +00001361static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001363 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001365 struct be_eth_tx_compl *txcp;
1366 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001367 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1368 struct sk_buff *sent_skb;
1369 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370
Sathya Perlaa8e91792009-08-10 03:42:43 +00001371 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1372 do {
1373 while ((txcp = be_tx_compl_get(tx_cq))) {
1374 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1375 wrb_index, txcp);
1376 be_tx_compl_process(adapter, end_idx);
1377 cmpl++;
1378 }
1379 if (cmpl) {
1380 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1381 cmpl = 0;
1382 }
1383
1384 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1385 break;
1386
1387 mdelay(1);
1388 } while (true);
1389
1390 if (atomic_read(&txq->used))
1391 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1392 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001393
1394 /* free posted tx for which compls will never arrive */
1395 while (atomic_read(&txq->used)) {
1396 sent_skb = sent_skbs[txq->tail];
1397 end_idx = txq->tail;
1398 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001399 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1400 txq->len);
Sathya Perlab03388d2010-02-18 00:37:17 +00001401 be_tx_compl_process(adapter, end_idx);
1402 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403}
1404
Sathya Perla5fb379e2009-06-18 00:02:59 +00001405static void be_mcc_queues_destroy(struct be_adapter *adapter)
1406{
1407 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001408
Sathya Perla8788fdc2009-07-27 22:52:03 +00001409 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001410 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001411 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001412 be_queue_free(adapter, q);
1413
Sathya Perla8788fdc2009-07-27 22:52:03 +00001414 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001415 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001416 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001417 be_queue_free(adapter, q);
1418}
1419
1420/* Must be called only after TX qs are created as MCC shares TX EQ */
1421static int be_mcc_queues_create(struct be_adapter *adapter)
1422{
1423 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001424
1425 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001426 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001427 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001428 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001429 goto err;
1430
1431 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001432 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001433 goto mcc_cq_free;
1434
1435 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001436 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001437 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1438 goto mcc_cq_destroy;
1439
1440 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001441 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001442 goto mcc_q_free;
1443
1444 return 0;
1445
1446mcc_q_free:
1447 be_queue_free(adapter, q);
1448mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001449 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001450mcc_cq_free:
1451 be_queue_free(adapter, cq);
1452err:
1453 return -1;
1454}
1455
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456static void be_tx_queues_destroy(struct be_adapter *adapter)
1457{
1458 struct be_queue_info *q;
1459
1460 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001461 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001462 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463 be_queue_free(adapter, q);
1464
1465 q = &adapter->tx_obj.cq;
1466 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001467 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468 be_queue_free(adapter, q);
1469
Sathya Perla859b1e42009-08-10 03:43:51 +00001470 /* Clear any residual events */
1471 be_eq_clean(adapter, &adapter->tx_eq);
1472
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 q = &adapter->tx_eq.q;
1474 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001475 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 be_queue_free(adapter, q);
1477}
1478
1479static int be_tx_queues_create(struct be_adapter *adapter)
1480{
1481 struct be_queue_info *eq, *q, *cq;
1482
1483 adapter->tx_eq.max_eqd = 0;
1484 adapter->tx_eq.min_eqd = 0;
1485 adapter->tx_eq.cur_eqd = 96;
1486 adapter->tx_eq.enable_aic = false;
1487 /* Alloc Tx Event queue */
1488 eq = &adapter->tx_eq.q;
1489 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1490 return -1;
1491
1492 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001493 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001495
1496 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1497
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001498
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 /* Alloc TX eth compl queue */
1500 cq = &adapter->tx_obj.cq;
1501 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1502 sizeof(struct be_eth_tx_compl)))
1503 goto tx_eq_destroy;
1504
1505 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001506 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507 goto tx_cq_free;
1508
1509 /* Alloc TX eth queue */
1510 q = &adapter->tx_obj.q;
1511 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1512 goto tx_cq_destroy;
1513
1514 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001515 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 goto tx_q_free;
1517 return 0;
1518
1519tx_q_free:
1520 be_queue_free(adapter, q);
1521tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001522 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523tx_cq_free:
1524 be_queue_free(adapter, cq);
1525tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001526 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527tx_eq_free:
1528 be_queue_free(adapter, eq);
1529 return -1;
1530}
1531
1532static void be_rx_queues_destroy(struct be_adapter *adapter)
1533{
1534 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001535 struct be_rx_obj *rxo;
1536 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 for_all_rx_queues(adapter, rxo, i) {
1539 q = &rxo->q;
1540 if (q->created) {
1541 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1542 /* After the rxq is invalidated, wait for a grace time
1543 * of 1ms for all dma to end and the flush compl to
1544 * arrive
1545 */
1546 mdelay(1);
1547 be_rx_q_clean(adapter, rxo);
1548 }
1549 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001550
Sathya Perla3abcded2010-10-03 22:12:27 -07001551 q = &rxo->cq;
1552 if (q->created)
1553 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1554 be_queue_free(adapter, q);
1555
1556 /* Clear any residual events */
1557 q = &rxo->rx_eq.q;
1558 if (q->created) {
1559 be_eq_clean(adapter, &rxo->rx_eq);
1560 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1561 }
1562 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564}
1565
1566static int be_rx_queues_create(struct be_adapter *adapter)
1567{
1568 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001569 struct be_rx_obj *rxo;
1570 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001573 for_all_rx_queues(adapter, rxo, i) {
1574 rxo->adapter = adapter;
Sathya Perla64642812010-12-01 01:04:17 +00001575 /* Init last_frag_index so that the frag index in the first
1576 * completion will never match */
1577 rxo->last_frag_index = 0xffff;
Sathya Perla3abcded2010-10-03 22:12:27 -07001578 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1579 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580
Sathya Perla3abcded2010-10-03 22:12:27 -07001581 /* EQ */
1582 eq = &rxo->rx_eq.q;
1583 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1584 sizeof(struct be_eq_entry));
1585 if (rc)
1586 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587
Sathya Perla3abcded2010-10-03 22:12:27 -07001588 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1589 if (rc)
1590 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001592 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1593
Sathya Perla3abcded2010-10-03 22:12:27 -07001594 /* CQ */
1595 cq = &rxo->cq;
1596 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1597 sizeof(struct be_eth_rx_compl));
1598 if (rc)
1599 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600
Sathya Perla3abcded2010-10-03 22:12:27 -07001601 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1602 if (rc)
1603 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001604 /* Rx Q */
1605 q = &rxo->q;
1606 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1607 sizeof(struct be_eth_rx_d));
1608 if (rc)
1609 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610
Sathya Perla3abcded2010-10-03 22:12:27 -07001611 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1612 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1613 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1614 if (rc)
1615 goto err;
1616 }
1617
1618 if (be_multi_rxq(adapter)) {
1619 u8 rsstable[MAX_RSS_QS];
1620
1621 for_all_rss_queues(adapter, rxo, i)
1622 rsstable[i] = rxo->rss_id;
1623
1624 rc = be_cmd_rss_config(adapter, rsstable,
1625 adapter->num_rx_qs - 1);
1626 if (rc)
1627 goto err;
1628 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629
1630 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001631err:
1632 be_rx_queues_destroy(adapter);
1633 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001636static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001637{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001638 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1639 if (!eqe->evt)
1640 return false;
1641 else
1642 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001643}
1644
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645static irqreturn_t be_intx(int irq, void *dev)
1646{
1647 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001648 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001649 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001651 if (lancer_chip(adapter)) {
1652 if (event_peek(&adapter->tx_eq))
1653 tx = event_handle(adapter, &adapter->tx_eq);
1654 for_all_rx_queues(adapter, rxo, i) {
1655 if (event_peek(&rxo->rx_eq))
1656 rx |= event_handle(adapter, &rxo->rx_eq);
1657 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001659 if (!(tx || rx))
1660 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001661
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001662 } else {
1663 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1664 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1665 if (!isr)
1666 return IRQ_NONE;
1667
1668 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1669 event_handle(adapter, &adapter->tx_eq);
1670
1671 for_all_rx_queues(adapter, rxo, i) {
1672 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1673 event_handle(adapter, &rxo->rx_eq);
1674 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001675 }
Sathya Perlac001c212009-07-01 01:06:07 +00001676
Sathya Perla8788fdc2009-07-27 22:52:03 +00001677 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678}
1679
1680static irqreturn_t be_msix_rx(int irq, void *dev)
1681{
Sathya Perla3abcded2010-10-03 22:12:27 -07001682 struct be_rx_obj *rxo = dev;
1683 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684
Sathya Perla3abcded2010-10-03 22:12:27 -07001685 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
1687 return IRQ_HANDLED;
1688}
1689
Sathya Perla5fb379e2009-06-18 00:02:59 +00001690static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691{
1692 struct be_adapter *adapter = dev;
1693
Sathya Perla8788fdc2009-07-27 22:52:03 +00001694 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
1696 return IRQ_HANDLED;
1697}
1698
Sathya Perla64642812010-12-01 01:04:17 +00001699static inline bool do_gro(struct be_rx_obj *rxo,
1700 struct be_eth_rx_compl *rxcp, u8 err)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1703
1704 if (err)
Sathya Perla3abcded2010-10-03 22:12:27 -07001705 rxo->stats.rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001707 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708}
1709
stephen hemminger49b05222010-10-21 07:50:48 +00001710static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001711{
1712 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001713 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1714 struct be_adapter *adapter = rxo->adapter;
1715 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 struct be_eth_rx_compl *rxcp;
1717 u32 work_done;
Sathya Perla64642812010-12-01 01:04:17 +00001718 u16 frag_index, num_rcvd;
1719 u8 err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720
Sathya Perla3abcded2010-10-03 22:12:27 -07001721 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001723 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724 if (!rxcp)
1725 break;
1726
Sathya Perla64642812010-12-01 01:04:17 +00001727 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1728 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1729 rxcp);
1730 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1731 rxcp);
1732
1733 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1734 if (likely(frag_index != rxo->last_frag_index &&
1735 num_rcvd != 0)) {
1736 rxo->last_frag_index = frag_index;
1737
1738 if (do_gro(rxo, rxcp, err))
1739 be_rx_compl_process_gro(adapter, rxo, rxcp);
1740 else
1741 be_rx_compl_process(adapter, rxo, rxcp);
1742 }
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001743
1744 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745 }
1746
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001748 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1749 be_post_rx_frags(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750
1751 /* All consumed */
1752 if (work_done < budget) {
1753 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001754 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755 } else {
1756 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001757 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 }
1759 return work_done;
1760}
1761
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001762/* As TX and MCC share the same EQ check for both TX and MCC completions.
1763 * For TX/MCC we don't honour budget; consume everything
1764 */
1765static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001767 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1768 struct be_adapter *adapter =
1769 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001770 struct be_queue_info *txq = &adapter->tx_obj.q;
1771 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001773 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 u16 end_idx;
1775
Sathya Perla5fb379e2009-06-18 00:02:59 +00001776 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001778 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001780 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781 }
1782
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001783 mcc_compl = be_process_mcc(adapter, &status);
1784
1785 napi_complete(napi);
1786
1787 if (mcc_compl) {
1788 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1789 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1790 }
1791
1792 if (tx_compl) {
1793 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001794
1795 /* As Tx wrbs have been freed up, wake up netdev queue if
1796 * it was stopped due to lack of tx wrbs.
1797 */
1798 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001800 netif_wake_queue(adapter->netdev);
1801 }
1802
Sathya Perla3abcded2010-10-03 22:12:27 -07001803 tx_stats(adapter)->be_tx_events++;
1804 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806
1807 return 1;
1808}
1809
Ajit Khaparded053de92010-09-03 06:23:30 +00001810void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001811{
1812 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1813 u32 i;
1814
1815 pci_read_config_dword(adapter->pdev,
1816 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1817 pci_read_config_dword(adapter->pdev,
1818 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1819 pci_read_config_dword(adapter->pdev,
1820 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1821 pci_read_config_dword(adapter->pdev,
1822 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1823
1824 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1825 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1826
Ajit Khaparded053de92010-09-03 06:23:30 +00001827 if (ue_status_lo || ue_status_hi) {
1828 adapter->ue_detected = true;
1829 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1830 }
1831
Ajit Khaparde7c185272010-07-29 06:16:33 +00001832 if (ue_status_lo) {
1833 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1834 if (ue_status_lo & 1)
1835 dev_err(&adapter->pdev->dev,
1836 "UE: %s bit set\n", ue_status_low_desc[i]);
1837 }
1838 }
1839 if (ue_status_hi) {
1840 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1841 if (ue_status_hi & 1)
1842 dev_err(&adapter->pdev->dev,
1843 "UE: %s bit set\n", ue_status_hi_desc[i]);
1844 }
1845 }
1846
1847}
1848
Sathya Perlaea1dae12009-03-19 23:56:20 -07001849static void be_worker(struct work_struct *work)
1850{
1851 struct be_adapter *adapter =
1852 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001853 struct be_rx_obj *rxo;
1854 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001855
Somnath Koturf203af72010-10-25 23:01:03 +00001856 /* when interrupts are not yet enabled, just reap any pending
1857 * mcc completions */
1858 if (!netif_running(adapter->netdev)) {
1859 int mcc_compl, status = 0;
1860
1861 mcc_compl = be_process_mcc(adapter, &status);
1862
1863 if (mcc_compl) {
1864 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1865 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1866 }
1867 goto reschedule;
1868 }
1869
Ajit Khaparde0fc48c32010-07-29 06:18:58 +00001870 if (!adapter->stats_ioctl_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001872
Sathya Perla4097f662009-03-24 16:40:13 -07001873 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001874
Sathya Perla3abcded2010-10-03 22:12:27 -07001875 for_all_rx_queues(adapter, rxo, i) {
1876 be_rx_rate_update(rxo);
1877 be_rx_eqd_update(adapter, rxo);
1878
1879 if (rxo->rx_post_starved) {
1880 rxo->rx_post_starved = false;
1881 be_post_rx_frags(rxo);
1882 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001883 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001884 if (!adapter->ue_detected && !lancer_chip(adapter))
Ajit Khaparded053de92010-09-03 06:23:30 +00001885 be_detect_dump_ue(adapter);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001886
Somnath Koturf203af72010-10-25 23:01:03 +00001887reschedule:
Sathya Perlaea1dae12009-03-19 23:56:20 -07001888 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1889}
1890
Sathya Perla8d56ff12009-11-22 22:02:26 +00001891static void be_msix_disable(struct be_adapter *adapter)
1892{
1893 if (adapter->msix_enabled) {
1894 pci_disable_msix(adapter->pdev);
1895 adapter->msix_enabled = false;
1896 }
1897}
1898
Sathya Perla3abcded2010-10-03 22:12:27 -07001899static int be_num_rxqs_get(struct be_adapter *adapter)
1900{
1901 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1902 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1903 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1904 } else {
1905 dev_warn(&adapter->pdev->dev,
1906 "No support for multiple RX queues\n");
1907 return 1;
1908 }
1909}
1910
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911static void be_msix_enable(struct be_adapter *adapter)
1912{
Sathya Perla3abcded2010-10-03 22:12:27 -07001913#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 int i, status;
1915
Sathya Perla3abcded2010-10-03 22:12:27 -07001916 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1917
1918 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919 adapter->msix_entries[i].entry = i;
1920
1921 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perla3abcded2010-10-03 22:12:27 -07001922 adapter->num_rx_qs + 1);
1923 if (status == 0) {
1924 goto done;
1925 } else if (status >= BE_MIN_MSIX_VECTORS) {
1926 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1927 status) == 0) {
1928 adapter->num_rx_qs = status - 1;
1929 dev_warn(&adapter->pdev->dev,
1930 "Could alloc only %d MSIx vectors. "
1931 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1932 goto done;
1933 }
1934 }
1935 return;
1936done:
1937 adapter->msix_enabled = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938}
1939
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001940static void be_sriov_enable(struct be_adapter *adapter)
1941{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001942 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001943#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001944 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001945 int status;
1946
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001947 status = pci_enable_sriov(adapter->pdev, num_vfs);
1948 adapter->sriov_enabled = status ? false : true;
1949 }
1950#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001951}
1952
1953static void be_sriov_disable(struct be_adapter *adapter)
1954{
1955#ifdef CONFIG_PCI_IOV
1956 if (adapter->sriov_enabled) {
1957 pci_disable_sriov(adapter->pdev);
1958 adapter->sriov_enabled = false;
1959 }
1960#endif
1961}
1962
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001963static inline int be_msix_vec_get(struct be_adapter *adapter,
1964 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001966 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00001967}
1968
1969static int be_request_irq(struct be_adapter *adapter,
1970 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001971 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001972{
1973 struct net_device *netdev = adapter->netdev;
1974 int vec;
1975
1976 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001977 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001978 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00001979}
1980
Sathya Perla3abcded2010-10-03 22:12:27 -07001981static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1982 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001983{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001984 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001985 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986}
1987
1988static int be_msix_register(struct be_adapter *adapter)
1989{
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 struct be_rx_obj *rxo;
1991 int status, i;
1992 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993
Sathya Perla3abcded2010-10-03 22:12:27 -07001994 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1995 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996 if (status)
1997 goto err;
1998
Sathya Perla3abcded2010-10-03 22:12:27 -07001999 for_all_rx_queues(adapter, rxo, i) {
2000 sprintf(qname, "rxq%d", i);
2001 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2002 qname, rxo);
2003 if (status)
2004 goto err_msix;
2005 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002006
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002008
Sathya Perla3abcded2010-10-03 22:12:27 -07002009err_msix:
2010 be_free_irq(adapter, &adapter->tx_eq, adapter);
2011
2012 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2013 be_free_irq(adapter, &rxo->rx_eq, rxo);
2014
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015err:
2016 dev_warn(&adapter->pdev->dev,
2017 "MSIX Request IRQ failed - err %d\n", status);
2018 pci_disable_msix(adapter->pdev);
2019 adapter->msix_enabled = false;
2020 return status;
2021}
2022
2023static int be_irq_register(struct be_adapter *adapter)
2024{
2025 struct net_device *netdev = adapter->netdev;
2026 int status;
2027
2028 if (adapter->msix_enabled) {
2029 status = be_msix_register(adapter);
2030 if (status == 0)
2031 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002032 /* INTx is not supported for VF */
2033 if (!be_physfn(adapter))
2034 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 }
2036
2037 /* INTx */
2038 netdev->irq = adapter->pdev->irq;
2039 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2040 adapter);
2041 if (status) {
2042 dev_err(&adapter->pdev->dev,
2043 "INTx request IRQ failed - err %d\n", status);
2044 return status;
2045 }
2046done:
2047 adapter->isr_registered = true;
2048 return 0;
2049}
2050
2051static void be_irq_unregister(struct be_adapter *adapter)
2052{
2053 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002054 struct be_rx_obj *rxo;
2055 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056
2057 if (!adapter->isr_registered)
2058 return;
2059
2060 /* INTx */
2061 if (!adapter->msix_enabled) {
2062 free_irq(netdev->irq, adapter);
2063 goto done;
2064 }
2065
2066 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002067 be_free_irq(adapter, &adapter->tx_eq, adapter);
2068
2069 for_all_rx_queues(adapter, rxo, i)
2070 be_free_irq(adapter, &rxo->rx_eq, rxo);
2071
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072done:
2073 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074}
2075
Sathya Perla889cd4b2010-05-30 23:33:45 +00002076static int be_close(struct net_device *netdev)
2077{
2078 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002079 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002080 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002081 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002082
Sathya Perla889cd4b2010-05-30 23:33:45 +00002083 be_async_mcc_disable(adapter);
2084
2085 netif_stop_queue(netdev);
2086 netif_carrier_off(netdev);
2087 adapter->link_up = false;
2088
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002089 if (!lancer_chip(adapter))
2090 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002091
2092 if (adapter->msix_enabled) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002093 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002094 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002095
2096 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002097 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002098 synchronize_irq(vec);
2099 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002100 } else {
2101 synchronize_irq(netdev->irq);
2102 }
2103 be_irq_unregister(adapter);
2104
Sathya Perla3abcded2010-10-03 22:12:27 -07002105 for_all_rx_queues(adapter, rxo, i)
2106 napi_disable(&rxo->rx_eq.napi);
2107
Sathya Perla889cd4b2010-05-30 23:33:45 +00002108 napi_disable(&tx_eq->napi);
2109
2110 /* Wait for all pending tx completions to arrive so that
2111 * all tx skbs are freed.
2112 */
2113 be_tx_compl_clean(adapter);
2114
2115 return 0;
2116}
2117
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002118static int be_open(struct net_device *netdev)
2119{
2120 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002122 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002123 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002124 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002125 u8 mac_speed;
2126 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002127
Sathya Perla3abcded2010-10-03 22:12:27 -07002128 for_all_rx_queues(adapter, rxo, i) {
2129 be_post_rx_frags(rxo);
2130 napi_enable(&rxo->rx_eq.napi);
2131 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002132 napi_enable(&tx_eq->napi);
2133
2134 be_irq_register(adapter);
2135
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002136 if (!lancer_chip(adapter))
2137 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002138
2139 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002140 for_all_rx_queues(adapter, rxo, i) {
2141 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2142 be_cq_notify(adapter, rxo->cq.id, true, 0);
2143 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002144 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002145
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002146 /* Now that interrupts are on we can process async mcc */
2147 be_async_mcc_enable(adapter);
2148
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002149 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2150 &link_speed);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002151 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002152 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002153 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002154
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002155 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002156 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002157 if (status)
2158 goto err;
2159
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002160 status = be_cmd_set_flow_control(adapter,
2161 adapter->tx_fc, adapter->rx_fc);
2162 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002163 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002164 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002165
Sathya Perla889cd4b2010-05-30 23:33:45 +00002166 return 0;
2167err:
2168 be_close(adapter->netdev);
2169 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002170}
2171
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002172static int be_setup_wol(struct be_adapter *adapter, bool enable)
2173{
2174 struct be_dma_mem cmd;
2175 int status = 0;
2176 u8 mac[ETH_ALEN];
2177
2178 memset(mac, 0, ETH_ALEN);
2179
2180 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002181 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2182 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002183 if (cmd.va == NULL)
2184 return -1;
2185 memset(cmd.va, 0, cmd.size);
2186
2187 if (enable) {
2188 status = pci_write_config_dword(adapter->pdev,
2189 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2190 if (status) {
2191 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002192 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002193 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2194 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002195 return status;
2196 }
2197 status = be_cmd_enable_magic_wol(adapter,
2198 adapter->netdev->dev_addr, &cmd);
2199 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2200 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2201 } else {
2202 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2203 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2204 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2205 }
2206
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002207 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002208 return status;
2209}
2210
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002211/*
2212 * Generate a seed MAC address from the PF MAC Address using jhash.
2213 * MAC Address for VFs are assigned incrementally starting from the seed.
2214 * These addresses are programmed in the ASIC by the PF and the VF driver
2215 * queries for the MAC address during its probe.
2216 */
2217static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2218{
2219 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002221 u8 mac[ETH_ALEN];
2222
2223 be_vf_eth_addr_generate(adapter, mac);
2224
2225 for (vf = 0; vf < num_vfs; vf++) {
2226 status = be_cmd_pmac_add(adapter, mac,
2227 adapter->vf_cfg[vf].vf_if_handle,
2228 &adapter->vf_cfg[vf].vf_pmac_id);
2229 if (status)
2230 dev_err(&adapter->pdev->dev,
2231 "Mac address add failed for VF %d\n", vf);
2232 else
2233 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2234
2235 mac[5] += 1;
2236 }
2237 return status;
2238}
2239
2240static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2241{
2242 u32 vf;
2243
2244 for (vf = 0; vf < num_vfs; vf++) {
2245 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2246 be_cmd_pmac_del(adapter,
2247 adapter->vf_cfg[vf].vf_if_handle,
2248 adapter->vf_cfg[vf].vf_pmac_id);
2249 }
2250}
2251
Sathya Perla5fb379e2009-06-18 00:02:59 +00002252static int be_setup(struct be_adapter *adapter)
2253{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002254 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002255 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002257 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002259 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2260
2261 if (be_physfn(adapter)) {
2262 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2263 BE_IF_FLAGS_PROMISCUOUS |
2264 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2265 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002266
2267 if (be_multi_rxq(adapter)) {
2268 cap_flags |= BE_IF_FLAGS_RSS;
2269 en_flags |= BE_IF_FLAGS_RSS;
2270 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002271 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002272
2273 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2274 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002275 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276 if (status != 0)
2277 goto do_none;
2278
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002279 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002280 if (adapter->sriov_enabled) {
2281 while (vf < num_vfs) {
2282 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2283 BE_IF_FLAGS_BROADCAST;
2284 status = be_cmd_if_create(adapter, cap_flags,
2285 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002286 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002287 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002288 if (status) {
2289 dev_err(&adapter->pdev->dev,
2290 "Interface Create failed for VF %d\n",
2291 vf);
2292 goto if_destroy;
2293 }
2294 adapter->vf_cfg[vf].vf_pmac_id =
2295 BE_INVALID_PMAC_ID;
2296 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002297 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002298 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002299 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002300 status = be_cmd_mac_addr_query(adapter, mac,
2301 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2302 if (!status) {
2303 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2304 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2305 }
2306 }
2307
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308 status = be_tx_queues_create(adapter);
2309 if (status != 0)
2310 goto if_destroy;
2311
2312 status = be_rx_queues_create(adapter);
2313 if (status != 0)
2314 goto tx_qs_destroy;
2315
Sathya Perla5fb379e2009-06-18 00:02:59 +00002316 status = be_mcc_queues_create(adapter);
2317 if (status != 0)
2318 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002319
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002320 adapter->link_speed = -1;
2321
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322 return 0;
2323
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002324 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002325rx_qs_destroy:
2326 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327tx_qs_destroy:
2328 be_tx_queues_destroy(adapter);
2329if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002330 if (be_physfn(adapter) && adapter->sriov_enabled)
2331 for (vf = 0; vf < num_vfs; vf++)
2332 if (adapter->vf_cfg[vf].vf_if_handle)
2333 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002334 adapter->vf_cfg[vf].vf_if_handle,
2335 vf + 1);
2336 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337do_none:
2338 return status;
2339}
2340
Sathya Perla5fb379e2009-06-18 00:02:59 +00002341static int be_clear(struct be_adapter *adapter)
2342{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002343 int vf;
2344
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002345 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002346 be_vf_eth_addr_rem(adapter);
2347
Sathya Perla1a8887d2009-08-17 00:58:41 +00002348 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002349 be_rx_queues_destroy(adapter);
2350 be_tx_queues_destroy(adapter);
2351
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002352 if (be_physfn(adapter) && adapter->sriov_enabled)
2353 for (vf = 0; vf < num_vfs; vf++)
2354 if (adapter->vf_cfg[vf].vf_if_handle)
2355 be_cmd_if_destroy(adapter,
2356 adapter->vf_cfg[vf].vf_if_handle,
2357 vf + 1);
2358
Ajit Khaparde658681f2011-02-11 13:34:46 +00002359 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002360
Sathya Perla2243e2e2009-11-22 22:02:03 +00002361 /* tell fw we're done with firing cmds */
2362 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002363 return 0;
2364}
2365
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002366
Ajit Khaparde84517482009-09-04 03:12:16 +00002367#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002368static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002369 const u8 *p, u32 img_start, int image_size,
2370 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002371{
2372 u32 crc_offset;
2373 u8 flashed_crc[4];
2374 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002375
2376 crc_offset = hdr_size + img_start + image_size - 4;
2377
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002378 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002379
2380 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002381 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002382 if (status) {
2383 dev_err(&adapter->pdev->dev,
2384 "could not get crc from flash, not flashing redboot\n");
2385 return false;
2386 }
2387
2388 /*update redboot only if crc does not match*/
2389 if (!memcmp(flashed_crc, p, 4))
2390 return false;
2391 else
2392 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002393}
2394
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002395static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002396 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002397 struct be_dma_mem *flash_cmd, int num_of_images)
2398
Ajit Khaparde84517482009-09-04 03:12:16 +00002399{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002400 int status = 0, i, filehdr_size = 0;
2401 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002402 int num_bytes;
2403 const u8 *p = fw->data;
2404 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002405 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002406 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002407
Joe Perches215faf92010-12-21 02:16:10 -08002408 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002409 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2410 FLASH_IMAGE_MAX_SIZE_g3},
2411 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2412 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2413 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2414 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2415 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2416 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2417 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2418 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2419 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2420 FLASH_IMAGE_MAX_SIZE_g3},
2421 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2422 FLASH_IMAGE_MAX_SIZE_g3},
2423 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002424 FLASH_IMAGE_MAX_SIZE_g3},
2425 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2426 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002427 };
Joe Perches215faf92010-12-21 02:16:10 -08002428 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002429 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2430 FLASH_IMAGE_MAX_SIZE_g2},
2431 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2432 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2433 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2434 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2435 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2436 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2437 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2438 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2439 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2440 FLASH_IMAGE_MAX_SIZE_g2},
2441 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2442 FLASH_IMAGE_MAX_SIZE_g2},
2443 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2444 FLASH_IMAGE_MAX_SIZE_g2}
2445 };
2446
2447 if (adapter->generation == BE_GEN3) {
2448 pflashcomp = gen3_flash_types;
2449 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002450 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002451 } else {
2452 pflashcomp = gen2_flash_types;
2453 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002454 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002455 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002456 for (i = 0; i < num_comp; i++) {
2457 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2458 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2459 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002460 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2461 (!be_flash_redboot(adapter, fw->data,
2462 pflashcomp[i].offset, pflashcomp[i].size,
2463 filehdr_size)))
2464 continue;
2465 p = fw->data;
2466 p += filehdr_size + pflashcomp[i].offset
2467 + (num_of_images * sizeof(struct image_hdr));
2468 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002469 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002470 total_bytes = pflashcomp[i].size;
2471 while (total_bytes) {
2472 if (total_bytes > 32*1024)
2473 num_bytes = 32*1024;
2474 else
2475 num_bytes = total_bytes;
2476 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002477
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002478 if (!total_bytes)
2479 flash_op = FLASHROM_OPER_FLASH;
2480 else
2481 flash_op = FLASHROM_OPER_SAVE;
2482 memcpy(req->params.data_buf, p, num_bytes);
2483 p += num_bytes;
2484 status = be_cmd_write_flashrom(adapter, flash_cmd,
2485 pflashcomp[i].optype, flash_op, num_bytes);
2486 if (status) {
2487 dev_err(&adapter->pdev->dev,
2488 "cmd to write to flash rom failed.\n");
2489 return -1;
2490 }
2491 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002492 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002493 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002494 return 0;
2495}
2496
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002497static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2498{
2499 if (fhdr == NULL)
2500 return 0;
2501 if (fhdr->build[0] == '3')
2502 return BE_GEN3;
2503 else if (fhdr->build[0] == '2')
2504 return BE_GEN2;
2505 else
2506 return 0;
2507}
2508
Ajit Khaparde84517482009-09-04 03:12:16 +00002509int be_load_fw(struct be_adapter *adapter, u8 *func)
2510{
2511 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2512 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002513 struct flash_file_hdr_g2 *fhdr;
2514 struct flash_file_hdr_g3 *fhdr3;
2515 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002516 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002517 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002518 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002519
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002520 if (!netif_running(adapter->netdev)) {
2521 dev_err(&adapter->pdev->dev,
2522 "Firmware load not allowed (interface is down)\n");
2523 return -EPERM;
2524 }
2525
Ajit Khaparde84517482009-09-04 03:12:16 +00002526 strcpy(fw_file, func);
2527
2528 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2529 if (status)
2530 goto fw_exit;
2531
2532 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002533 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002534 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2535
Ajit Khaparde84517482009-09-04 03:12:16 +00002536 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002537 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2538 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002539 if (!flash_cmd.va) {
2540 status = -ENOMEM;
2541 dev_err(&adapter->pdev->dev,
2542 "Memory allocation failure while flashing\n");
2543 goto fw_exit;
2544 }
2545
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002546 if ((adapter->generation == BE_GEN3) &&
2547 (get_ufigen_type(fhdr) == BE_GEN3)) {
2548 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002549 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2550 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002551 img_hdr_ptr = (struct image_hdr *) (fw->data +
2552 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002553 i * sizeof(struct image_hdr)));
2554 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2555 status = be_flash_data(adapter, fw, &flash_cmd,
2556 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002557 }
2558 } else if ((adapter->generation == BE_GEN2) &&
2559 (get_ufigen_type(fhdr) == BE_GEN2)) {
2560 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2561 } else {
2562 dev_err(&adapter->pdev->dev,
2563 "UFI and Interface are not compatible for flashing\n");
2564 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002565 }
2566
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002567 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2568 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002569 if (status) {
2570 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2571 goto fw_exit;
2572 }
2573
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002574 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002575
2576fw_exit:
2577 release_firmware(fw);
2578 return status;
2579}
2580
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002581static struct net_device_ops be_netdev_ops = {
2582 .ndo_open = be_open,
2583 .ndo_stop = be_close,
2584 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002585 .ndo_set_rx_mode = be_set_multicast_list,
2586 .ndo_set_mac_address = be_mac_addr_set,
2587 .ndo_change_mtu = be_change_mtu,
2588 .ndo_validate_addr = eth_validate_addr,
2589 .ndo_vlan_rx_register = be_vlan_register,
2590 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2591 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002592 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002593 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002594 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002595 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002596};
2597
2598static void be_netdev_init(struct net_device *netdev)
2599{
2600 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002601 struct be_rx_obj *rxo;
2602 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002603
2604 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Michał Mirosław79032642010-11-30 06:38:00 +00002605 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2606 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ajit Khaparde49e4b8472010-06-14 04:56:07 +00002607 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002608
Michał Mirosław79032642010-11-30 06:38:00 +00002609 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2610 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002611
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002612 if (lancer_chip(adapter))
2613 netdev->vlan_features |= NETIF_F_TSO6;
2614
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002615 netdev->flags |= IFF_MULTICAST;
2616
Ajit Khaparde728a9972009-04-13 15:41:22 -07002617 adapter->rx_csum = true;
2618
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002619 /* Default settings for Rx and Tx flow control */
2620 adapter->rx_fc = true;
2621 adapter->tx_fc = true;
2622
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002623 netif_set_gso_max_size(netdev, 65535);
2624
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002625 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2626
2627 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2628
Sathya Perla3abcded2010-10-03 22:12:27 -07002629 for_all_rx_queues(adapter, rxo, i)
2630 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2631 BE_NAPI_WEIGHT);
2632
Sathya Perla5fb379e2009-06-18 00:02:59 +00002633 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002634 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002635}
2636
2637static void be_unmap_pci_bars(struct be_adapter *adapter)
2638{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002639 if (adapter->csr)
2640 iounmap(adapter->csr);
2641 if (adapter->db)
2642 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002643 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002644 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002645}
2646
2647static int be_map_pci_bars(struct be_adapter *adapter)
2648{
2649 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002650 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002651
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002652 if (lancer_chip(adapter)) {
2653 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2654 pci_resource_len(adapter->pdev, 0));
2655 if (addr == NULL)
2656 return -ENOMEM;
2657 adapter->db = addr;
2658 return 0;
2659 }
2660
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002661 if (be_physfn(adapter)) {
2662 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2663 pci_resource_len(adapter->pdev, 2));
2664 if (addr == NULL)
2665 return -ENOMEM;
2666 adapter->csr = addr;
2667 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002668
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002669 if (adapter->generation == BE_GEN2) {
2670 pcicfg_reg = 1;
2671 db_reg = 4;
2672 } else {
2673 pcicfg_reg = 0;
2674 if (be_physfn(adapter))
2675 db_reg = 4;
2676 else
2677 db_reg = 0;
2678 }
2679 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2680 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002681 if (addr == NULL)
2682 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002683 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002684
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002685 if (be_physfn(adapter)) {
2686 addr = ioremap_nocache(
2687 pci_resource_start(adapter->pdev, pcicfg_reg),
2688 pci_resource_len(adapter->pdev, pcicfg_reg));
2689 if (addr == NULL)
2690 goto pci_map_err;
2691 adapter->pcicfg = addr;
2692 } else
2693 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002694
2695 return 0;
2696pci_map_err:
2697 be_unmap_pci_bars(adapter);
2698 return -ENOMEM;
2699}
2700
2701
2702static void be_ctrl_cleanup(struct be_adapter *adapter)
2703{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002704 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002705
2706 be_unmap_pci_bars(adapter);
2707
2708 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002709 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2710 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002711
2712 mem = &adapter->mc_cmd_mem;
2713 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002714 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2715 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002716}
2717
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718static int be_ctrl_init(struct be_adapter *adapter)
2719{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002720 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2721 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002722 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002723 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002724
2725 status = be_map_pci_bars(adapter);
2726 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002727 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002728
2729 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002730 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2731 mbox_mem_alloc->size,
2732 &mbox_mem_alloc->dma,
2733 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002734 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002735 status = -ENOMEM;
2736 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002737 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002738
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002739 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2740 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2741 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2742 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002743
2744 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002745 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2746 mc_cmd_mem->size, &mc_cmd_mem->dma,
2747 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002748 if (mc_cmd_mem->va == NULL) {
2749 status = -ENOMEM;
2750 goto free_mbox;
2751 }
2752 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2753
Ivan Vecera29849612010-12-14 05:43:19 +00002754 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002755 spin_lock_init(&adapter->mcc_lock);
2756 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002757
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002758 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002759 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002760 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002761
2762free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002763 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2764 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002765
2766unmap_pci_bars:
2767 be_unmap_pci_bars(adapter);
2768
2769done:
2770 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002771}
2772
2773static void be_stats_cleanup(struct be_adapter *adapter)
2774{
Sathya Perla3abcded2010-10-03 22:12:27 -07002775 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002776
2777 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002778 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2779 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002780}
2781
2782static int be_stats_init(struct be_adapter *adapter)
2783{
Sathya Perla3abcded2010-10-03 22:12:27 -07002784 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002785
2786 cmd->size = sizeof(struct be_cmd_req_get_stats);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002787 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2788 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002789 if (cmd->va == NULL)
2790 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002791 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002792 return 0;
2793}
2794
2795static void __devexit be_remove(struct pci_dev *pdev)
2796{
2797 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002798
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002799 if (!adapter)
2800 return;
2801
Somnath Koturf203af72010-10-25 23:01:03 +00002802 cancel_delayed_work_sync(&adapter->work);
2803
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002804 unregister_netdev(adapter->netdev);
2805
Sathya Perla5fb379e2009-06-18 00:02:59 +00002806 be_clear(adapter);
2807
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002808 be_stats_cleanup(adapter);
2809
2810 be_ctrl_cleanup(adapter);
2811
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002812 be_sriov_disable(adapter);
2813
Sathya Perla8d56ff12009-11-22 22:02:26 +00002814 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002815
2816 pci_set_drvdata(pdev, NULL);
2817 pci_release_regions(pdev);
2818 pci_disable_device(pdev);
2819
2820 free_netdev(adapter->netdev);
2821}
2822
Sathya Perla2243e2e2009-11-22 22:02:03 +00002823static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002824{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002825 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002826 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002827
Sathya Perla8788fdc2009-07-27 22:52:03 +00002828 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002829 if (status)
2830 return status;
2831
Sathya Perla3abcded2010-10-03 22:12:27 -07002832 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2833 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002834 if (status)
2835 return status;
2836
2837 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002838
2839 if (be_physfn(adapter)) {
2840 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002841 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002842
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002843 if (status)
2844 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002845
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002846 if (!is_valid_ether_addr(mac))
2847 return -EADDRNOTAVAIL;
2848
2849 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2850 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2851 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002852
Ajit Khaparde3486be22010-07-23 02:04:54 +00002853 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002854 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2855 else
2856 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2857
Sathya Perla2243e2e2009-11-22 22:02:03 +00002858 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002859}
2860
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002861static int be_dev_family_check(struct be_adapter *adapter)
2862{
2863 struct pci_dev *pdev = adapter->pdev;
2864 u32 sli_intf = 0, if_type;
2865
2866 switch (pdev->device) {
2867 case BE_DEVICE_ID1:
2868 case OC_DEVICE_ID1:
2869 adapter->generation = BE_GEN2;
2870 break;
2871 case BE_DEVICE_ID2:
2872 case OC_DEVICE_ID2:
2873 adapter->generation = BE_GEN3;
2874 break;
2875 case OC_DEVICE_ID3:
2876 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2877 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2878 SLI_INTF_IF_TYPE_SHIFT;
2879
2880 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2881 if_type != 0x02) {
2882 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2883 return -EINVAL;
2884 }
2885 if (num_vfs > 0) {
2886 dev_err(&pdev->dev, "VFs not supported\n");
2887 return -EINVAL;
2888 }
2889 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2890 SLI_INTF_FAMILY_SHIFT);
2891 adapter->generation = BE_GEN3;
2892 break;
2893 default:
2894 adapter->generation = 0;
2895 }
2896 return 0;
2897}
2898
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899static int __devinit be_probe(struct pci_dev *pdev,
2900 const struct pci_device_id *pdev_id)
2901{
2902 int status = 0;
2903 struct be_adapter *adapter;
2904 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002905
2906 status = pci_enable_device(pdev);
2907 if (status)
2908 goto do_none;
2909
2910 status = pci_request_regions(pdev, DRV_NAME);
2911 if (status)
2912 goto disable_dev;
2913 pci_set_master(pdev);
2914
2915 netdev = alloc_etherdev(sizeof(struct be_adapter));
2916 if (netdev == NULL) {
2917 status = -ENOMEM;
2918 goto rel_reg;
2919 }
2920 adapter = netdev_priv(netdev);
2921 adapter->pdev = pdev;
2922 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002923
2924 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00002925 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002926 goto free_netdev;
2927
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002928 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002929 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002930
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002931 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002932 if (!status) {
2933 netdev->features |= NETIF_F_HIGHDMA;
2934 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002935 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002936 if (status) {
2937 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2938 goto free_netdev;
2939 }
2940 }
2941
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002942 be_sriov_enable(adapter);
2943
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944 status = be_ctrl_init(adapter);
2945 if (status)
2946 goto free_netdev;
2947
Sathya Perla2243e2e2009-11-22 22:02:03 +00002948 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002949 if (be_physfn(adapter)) {
2950 status = be_cmd_POST(adapter);
2951 if (status)
2952 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002953 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002954
2955 /* tell fw we're ready to fire cmds */
2956 status = be_cmd_fw_init(adapter);
2957 if (status)
2958 goto ctrl_clean;
2959
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07002960 if (be_physfn(adapter)) {
2961 status = be_cmd_reset_function(adapter);
2962 if (status)
2963 goto ctrl_clean;
2964 }
2965
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002966 status = be_stats_init(adapter);
2967 if (status)
2968 goto ctrl_clean;
2969
Sathya Perla2243e2e2009-11-22 22:02:03 +00002970 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002971 if (status)
2972 goto stats_clean;
2973
Sathya Perla3abcded2010-10-03 22:12:27 -07002974 be_msix_enable(adapter);
2975
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002976 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002977
Sathya Perla5fb379e2009-06-18 00:02:59 +00002978 status = be_setup(adapter);
2979 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002980 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002981
Sathya Perla3abcded2010-10-03 22:12:27 -07002982 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002983 status = register_netdev(netdev);
2984 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002985 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00002986 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002987
Ajit Khapardee6319362011-02-11 13:35:41 +00002988 if (be_physfn(adapter) && adapter->sriov_enabled) {
2989 status = be_vf_eth_addr_config(adapter);
2990 if (status)
2991 goto unreg_netdev;
2992 }
2993
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002994 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00002995 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002996 return 0;
2997
Ajit Khapardee6319362011-02-11 13:35:41 +00002998unreg_netdev:
2999 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003000unsetup:
3001 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003002msix_disable:
3003 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003004stats_clean:
3005 be_stats_cleanup(adapter);
3006ctrl_clean:
3007 be_ctrl_cleanup(adapter);
3008free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003009 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003010 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003011 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003012rel_reg:
3013 pci_release_regions(pdev);
3014disable_dev:
3015 pci_disable_device(pdev);
3016do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003017 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003018 return status;
3019}
3020
3021static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3022{
3023 struct be_adapter *adapter = pci_get_drvdata(pdev);
3024 struct net_device *netdev = adapter->netdev;
3025
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003026 if (adapter->wol)
3027 be_setup_wol(adapter, true);
3028
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003029 netif_device_detach(netdev);
3030 if (netif_running(netdev)) {
3031 rtnl_lock();
3032 be_close(netdev);
3033 rtnl_unlock();
3034 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003035 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003036 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003037
3038 pci_save_state(pdev);
3039 pci_disable_device(pdev);
3040 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3041 return 0;
3042}
3043
3044static int be_resume(struct pci_dev *pdev)
3045{
3046 int status = 0;
3047 struct be_adapter *adapter = pci_get_drvdata(pdev);
3048 struct net_device *netdev = adapter->netdev;
3049
3050 netif_device_detach(netdev);
3051
3052 status = pci_enable_device(pdev);
3053 if (status)
3054 return status;
3055
3056 pci_set_power_state(pdev, 0);
3057 pci_restore_state(pdev);
3058
Sathya Perla2243e2e2009-11-22 22:02:03 +00003059 /* tell fw we're ready to fire cmds */
3060 status = be_cmd_fw_init(adapter);
3061 if (status)
3062 return status;
3063
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003064 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003065 if (netif_running(netdev)) {
3066 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067 be_open(netdev);
3068 rtnl_unlock();
3069 }
3070 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003071
3072 if (adapter->wol)
3073 be_setup_wol(adapter, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003074 return 0;
3075}
3076
Sathya Perla82456b02010-02-17 01:35:37 +00003077/*
3078 * An FLR will stop BE from DMAing any data.
3079 */
3080static void be_shutdown(struct pci_dev *pdev)
3081{
3082 struct be_adapter *adapter = pci_get_drvdata(pdev);
3083 struct net_device *netdev = adapter->netdev;
3084
3085 netif_device_detach(netdev);
3086
3087 be_cmd_reset_function(adapter);
3088
3089 if (adapter->wol)
3090 be_setup_wol(adapter, true);
3091
3092 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003093}
3094
Sathya Perlacf588472010-02-14 21:22:01 +00003095static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3096 pci_channel_state_t state)
3097{
3098 struct be_adapter *adapter = pci_get_drvdata(pdev);
3099 struct net_device *netdev = adapter->netdev;
3100
3101 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3102
3103 adapter->eeh_err = true;
3104
3105 netif_device_detach(netdev);
3106
3107 if (netif_running(netdev)) {
3108 rtnl_lock();
3109 be_close(netdev);
3110 rtnl_unlock();
3111 }
3112 be_clear(adapter);
3113
3114 if (state == pci_channel_io_perm_failure)
3115 return PCI_ERS_RESULT_DISCONNECT;
3116
3117 pci_disable_device(pdev);
3118
3119 return PCI_ERS_RESULT_NEED_RESET;
3120}
3121
3122static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3123{
3124 struct be_adapter *adapter = pci_get_drvdata(pdev);
3125 int status;
3126
3127 dev_info(&adapter->pdev->dev, "EEH reset\n");
3128 adapter->eeh_err = false;
3129
3130 status = pci_enable_device(pdev);
3131 if (status)
3132 return PCI_ERS_RESULT_DISCONNECT;
3133
3134 pci_set_master(pdev);
3135 pci_set_power_state(pdev, 0);
3136 pci_restore_state(pdev);
3137
3138 /* Check if card is ok and fw is ready */
3139 status = be_cmd_POST(adapter);
3140 if (status)
3141 return PCI_ERS_RESULT_DISCONNECT;
3142
3143 return PCI_ERS_RESULT_RECOVERED;
3144}
3145
3146static void be_eeh_resume(struct pci_dev *pdev)
3147{
3148 int status = 0;
3149 struct be_adapter *adapter = pci_get_drvdata(pdev);
3150 struct net_device *netdev = adapter->netdev;
3151
3152 dev_info(&adapter->pdev->dev, "EEH resume\n");
3153
3154 pci_save_state(pdev);
3155
3156 /* tell fw we're ready to fire cmds */
3157 status = be_cmd_fw_init(adapter);
3158 if (status)
3159 goto err;
3160
3161 status = be_setup(adapter);
3162 if (status)
3163 goto err;
3164
3165 if (netif_running(netdev)) {
3166 status = be_open(netdev);
3167 if (status)
3168 goto err;
3169 }
3170 netif_device_attach(netdev);
3171 return;
3172err:
3173 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003174}
3175
3176static struct pci_error_handlers be_eeh_handlers = {
3177 .error_detected = be_eeh_err_detected,
3178 .slot_reset = be_eeh_reset,
3179 .resume = be_eeh_resume,
3180};
3181
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003182static struct pci_driver be_driver = {
3183 .name = DRV_NAME,
3184 .id_table = be_dev_ids,
3185 .probe = be_probe,
3186 .remove = be_remove,
3187 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003188 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003189 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003190 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003191};
3192
3193static int __init be_init_module(void)
3194{
Joe Perches8e95a202009-12-03 07:58:21 +00003195 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3196 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003197 printk(KERN_WARNING DRV_NAME
3198 " : Module param rx_frag_size must be 2048/4096/8192."
3199 " Using 2048\n");
3200 rx_frag_size = 2048;
3201 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003202
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003203 if (num_vfs > 32) {
3204 printk(KERN_WARNING DRV_NAME
3205 " : Module param num_vfs must not be greater than 32."
3206 "Using 32\n");
3207 num_vfs = 32;
3208 }
3209
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003210 return pci_register_driver(&be_driver);
3211}
3212module_init(be_init_module);
3213
3214static void __exit be_exit_module(void)
3215{
3216 pci_unregister_driver(&be_driver);
3217}
3218module_exit(be_exit_module);