blob: 4c73dceaeedf15908716708be7829704b43a6e20 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030module_param(rx_frag_size, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
Sathya Perla3abcded2010-10-03 22:12:27 -0700119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130}
131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143 if (!mem->va)
144 return -1;
145 memset(mem->va, 0, mem->size);
146 return 0;
147}
148
Sathya Perla8788fdc2009-07-27 22:52:03 +0000149static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000151 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 u32 reg = ioread32(addr);
153 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perlacf588472010-02-14 21:22:01 +0000155 if (adapter->eeh_err)
156 return;
157
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700159 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 iowrite32(reg, addr);
166}
167
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169{
170 u32 val = 0;
171 val |= qid & DB_RQ_RING_ID_MASK;
172 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000173
174 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000175 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176}
177
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179{
180 u32 val = 0;
181 val |= qid & DB_TXULP_RING_ID_MASK;
182 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000183
184 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189 bool arm, bool clear_int, u16 num_popped)
190{
191 u32 val = 0;
192 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000193 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000195
196 if (adapter->eeh_err)
197 return;
198
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199 if (arm)
200 val |= 1 << DB_EQ_REARM_SHIFT;
201 if (clear_int)
202 val |= 1 << DB_EQ_CLR_SHIFT;
203 val |= 1 << DB_EQ_EVNT_SHIFT;
204 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000205 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209{
210 u32 val = 0;
211 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
215 if (adapter->eeh_err)
216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_CQ_REARM_SHIFT;
220 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000221 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222}
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224static int be_mac_addr_set(struct net_device *netdev, void *p)
225{
226 struct be_adapter *adapter = netdev_priv(netdev);
227 struct sockaddr *addr = p;
228 int status = 0;
229
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000230 if (!is_valid_ether_addr(addr->sa_data))
231 return -EADDRNOTAVAIL;
232
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000233 /* MAC addr configuration will be done in hardware for VFs
234 * by their corresponding PFs. Just copy to netdev addr here
235 */
236 if (!be_physfn(adapter))
237 goto netdev_addr;
238
Sathya Perlaa65027e2009-08-17 00:58:04 +0000239 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
240 if (status)
241 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242
Sathya Perlaa65027e2009-08-17 00:58:04 +0000243 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
244 adapter->if_handle, &adapter->pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000245netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246 if (!status)
247 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
248
249 return status;
250}
251
Sathya Perlab31c50a2009-09-17 10:30:13 -0700252void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253{
Sathya Perla3abcded2010-10-03 22:12:27 -0700254 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
256 struct be_port_rxf_stats *port_stats =
257 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700258 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000259 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700260 struct be_rx_obj *rxo;
261 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262
Sathya Perla3abcded2010-10-03 22:12:27 -0700263 memset(dev_stats, 0, sizeof(*dev_stats));
264 for_all_rx_queues(adapter, rxo, i) {
265 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
266 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
267 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
268 /* no space in linux buffers: best possible approximation */
269 dev_stats->rx_dropped +=
270 erx_stats->rx_drops_no_fragments[rxo->q.id];
271 }
272
273 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
274 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700275
276 /* bad pkts received */
277 dev_stats->rx_errors = port_stats->rx_crc_errors +
278 port_stats->rx_alignment_symbol_errors +
279 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000280 port_stats->rx_out_range_errors +
281 port_stats->rx_frame_too_long +
282 port_stats->rx_dropped_too_small +
283 port_stats->rx_dropped_too_short +
284 port_stats->rx_dropped_header_too_small +
285 port_stats->rx_dropped_tcp_length +
286 port_stats->rx_dropped_runt +
287 port_stats->rx_tcp_checksum_errs +
288 port_stats->rx_ip_checksum_errs +
289 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700290
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700291 /* detailed rx errors */
292 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000293 port_stats->rx_out_range_errors +
294 port_stats->rx_frame_too_long;
295
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700296 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
297
298 /* frame alignment errors */
299 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000300
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 /* receiver fifo overrun */
302 /* drops_no_pbuf is no per i/f, it's per BE card */
303 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
304 port_stats->rx_input_fifo_overflow +
305 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306}
307
Sathya Perla8788fdc2009-07-27 22:52:03 +0000308void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700309{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310 struct net_device *netdev = adapter->netdev;
311
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000313 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000314 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000315 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700316 netif_carrier_on(netdev);
317 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000318 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000319 netif_carrier_off(netdev);
320 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700321 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000322 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700323 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700324}
325
326/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700327static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700328{
Sathya Perla3abcded2010-10-03 22:12:27 -0700329 struct be_eq_obj *rx_eq = &rxo->rx_eq;
330 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700331 ulong now = jiffies;
332 u32 eqd;
333
334 if (!rx_eq->enable_aic)
335 return;
336
337 /* Wrapped around */
338 if (time_before(now, stats->rx_fps_jiffies)) {
339 stats->rx_fps_jiffies = now;
340 return;
341 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700342
343 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700344 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700345 return;
346
Sathya Perla3abcded2010-10-03 22:12:27 -0700347 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700348 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700349
Sathya Perla4097f662009-03-24 16:40:13 -0700350 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700351 stats->prev_rx_frags = stats->rx_frags;
352 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700353 eqd = eqd << 3;
354 if (eqd > rx_eq->max_eqd)
355 eqd = rx_eq->max_eqd;
356 if (eqd < rx_eq->min_eqd)
357 eqd = rx_eq->min_eqd;
358 if (eqd < 10)
359 eqd = 0;
360 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000361 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700362
363 rx_eq->cur_eqd = eqd;
364}
365
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700366static u32 be_calc_rate(u64 bytes, unsigned long ticks)
367{
368 u64 rate = bytes;
369
370 do_div(rate, ticks / HZ);
371 rate <<= 3; /* bytes/sec -> bits/sec */
372 do_div(rate, 1000000ul); /* MB/Sec */
373
374 return rate;
375}
376
Sathya Perla4097f662009-03-24 16:40:13 -0700377static void be_tx_rate_update(struct be_adapter *adapter)
378{
Sathya Perla3abcded2010-10-03 22:12:27 -0700379 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700380 ulong now = jiffies;
381
382 /* Wrapped around? */
383 if (time_before(now, stats->be_tx_jiffies)) {
384 stats->be_tx_jiffies = now;
385 return;
386 }
387
388 /* Update tx rate once in two seconds */
389 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700390 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
391 - stats->be_tx_bytes_prev,
392 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700393 stats->be_tx_jiffies = now;
394 stats->be_tx_bytes_prev = stats->be_tx_bytes;
395 }
396}
397
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700398static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000399 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700400{
Sathya Perla3abcded2010-10-03 22:12:27 -0700401 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700402 stats->be_tx_reqs++;
403 stats->be_tx_wrbs += wrb_cnt;
404 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000405 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700406 if (stopped)
407 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700408}
409
410/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000411static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
412 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700413{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700414 int cnt = (skb->len > skb->data_len);
415
416 cnt += skb_shinfo(skb)->nr_frags;
417
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700418 /* to account for hdr wrb */
419 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000420 if (lancer_chip(adapter) || !(cnt & 1)) {
421 *dummy = false;
422 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700423 /* add a dummy to make it an even num */
424 cnt++;
425 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000426 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700427 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
428 return cnt;
429}
430
431static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
432{
433 wrb->frag_pa_hi = upper_32_bits(addr);
434 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
435 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
436}
437
Somnath Koturcc4ce022010-10-21 07:11:14 -0700438static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
439 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700440{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700441 u8 vlan_prio = 0;
442 u16 vlan_tag = 0;
443
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700444 memset(hdr, 0, sizeof(*hdr));
445
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
447
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000448 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
451 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000452 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000453 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000454 if (lancer_chip(adapter) && adapter->sli_family ==
455 LANCER_A0_SLI_FAMILY) {
456 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
457 if (is_tcp_pkt(skb))
458 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
459 tcpcs, hdr, 1);
460 else if (is_udp_pkt(skb))
461 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
462 udpcs, hdr, 1);
463 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700464 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
465 if (is_tcp_pkt(skb))
466 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
467 else if (is_udp_pkt(skb))
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
469 }
470
Somnath Koturcc4ce022010-10-21 07:11:14 -0700471 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700473 vlan_tag = vlan_tx_tag_get(skb);
474 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
475 /* If vlan priority provided by OS is NOT in available bmap */
476 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
477 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
478 adapter->recommended_prio;
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480 }
481
482 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
486}
487
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000488static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000489 bool unmap_single)
490{
491 dma_addr_t dma;
492
493 be_dws_le_to_cpu(wrb, sizeof(*wrb));
494
495 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000496 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000497 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000498 dma_unmap_single(dev, dma, wrb->frag_len,
499 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000500 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000501 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000502 }
503}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700504
505static int make_tx_wrbs(struct be_adapter *adapter,
506 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
507{
Sathya Perla7101e112010-03-22 20:41:12 +0000508 dma_addr_t busaddr;
509 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000510 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511 struct sk_buff *first_skb = skb;
512 struct be_queue_info *txq = &adapter->tx_obj.q;
513 struct be_eth_wrb *wrb;
514 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000515 bool map_single = false;
516 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518 hdr = queue_head_node(txq);
519 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000520 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700521
David S. Millerebc8d2a2009-06-09 01:01:31 -0700522 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700523 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000524 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
525 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000526 goto dma_err;
527 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700528 wrb = queue_head_node(txq);
529 wrb_fill(wrb, busaddr, len);
530 be_dws_cpu_to_le(wrb, sizeof(*wrb));
531 queue_head_inc(txq);
532 copied += len;
533 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534
David S. Millerebc8d2a2009-06-09 01:01:31 -0700535 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
536 struct skb_frag_struct *frag =
537 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000538 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
539 frag->size, DMA_TO_DEVICE);
540 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000541 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700542 wrb = queue_head_node(txq);
543 wrb_fill(wrb, busaddr, frag->size);
544 be_dws_cpu_to_le(wrb, sizeof(*wrb));
545 queue_head_inc(txq);
546 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700547 }
548
549 if (dummy_wrb) {
550 wrb = queue_head_node(txq);
551 wrb_fill(wrb, 0, 0);
552 be_dws_cpu_to_le(wrb, sizeof(*wrb));
553 queue_head_inc(txq);
554 }
555
Somnath Koturcc4ce022010-10-21 07:11:14 -0700556 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557 be_dws_cpu_to_le(hdr, sizeof(*hdr));
558
559 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000560dma_err:
561 txq->head = map_head;
562 while (copied) {
563 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000564 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000565 map_single = false;
566 copied -= wrb->frag_len;
567 queue_head_inc(txq);
568 }
569 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700570}
571
Stephen Hemminger613573252009-08-31 19:50:58 +0000572static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700573 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700574{
575 struct be_adapter *adapter = netdev_priv(netdev);
576 struct be_tx_obj *tx_obj = &adapter->tx_obj;
577 struct be_queue_info *txq = &tx_obj->q;
578 u32 wrb_cnt = 0, copied = 0;
579 u32 start = txq->head;
580 bool dummy_wrb, stopped = false;
581
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000582 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583
584 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000585 if (copied) {
586 /* record the sent skb in the sent_skb table */
587 BUG_ON(tx_obj->sent_skb_list[start]);
588 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000590 /* Ensure txq has space for the next skb; Else stop the queue
591 * *BEFORE* ringing the tx doorbell, so that we serialze the
592 * tx compls of the current transmit which'll wake up the queue
593 */
Sathya Perla7101e112010-03-22 20:41:12 +0000594 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000595 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
596 txq->len) {
597 netif_stop_queue(netdev);
598 stopped = true;
599 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700600
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000601 be_txq_notify(adapter, txq->id, wrb_cnt);
602
Ajit Khaparde91992e42010-02-19 13:57:12 +0000603 be_tx_stats_update(adapter, wrb_cnt, copied,
604 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000605 } else {
606 txq->head = start;
607 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 return NETDEV_TX_OK;
610}
611
612static int be_change_mtu(struct net_device *netdev, int new_mtu)
613{
614 struct be_adapter *adapter = netdev_priv(netdev);
615 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000616 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
617 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 dev_info(&adapter->pdev->dev,
619 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000620 BE_MIN_MTU,
621 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 return -EINVAL;
623 }
624 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
625 netdev->mtu, new_mtu);
626 netdev->mtu = new_mtu;
627 return 0;
628}
629
630/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000631 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
632 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000634static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 u16 vtag[BE_NUM_VLANS_SUPPORTED];
637 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000638 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000639 u32 if_handle;
640
641 if (vf) {
642 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
643 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
644 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
645 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646
Ajit Khaparde82903e42010-02-09 01:34:57 +0000647 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000649 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650 if (adapter->vlan_tag[i]) {
651 vtag[ntags] = cpu_to_le16(i);
652 ntags++;
653 }
654 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700655 status = be_cmd_vlan_config(adapter, adapter->if_handle,
656 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700658 status = be_cmd_vlan_config(adapter, adapter->if_handle,
659 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000661
Sathya Perlab31c50a2009-09-17 10:30:13 -0700662 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
665static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
666{
667 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670}
671
672static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
673{
674 struct be_adapter *adapter = netdev_priv(netdev);
675
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000676 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000677 if (!be_physfn(adapter))
678 return;
679
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000681 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000682 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683}
684
685static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
686{
687 struct be_adapter *adapter = netdev_priv(netdev);
688
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000689 adapter->vlans_added--;
690 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
691
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000692 if (!be_physfn(adapter))
693 return;
694
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000696 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000697 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700static void be_set_multicast_list(struct net_device *netdev)
701{
702 struct be_adapter *adapter = netdev_priv(netdev);
703
704 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000705 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000706 adapter->promiscuous = true;
707 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000709
710 /* BE was previously in promiscous mode; disable it */
711 if (adapter->promiscuous) {
712 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000713 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000714 }
715
Sathya Perlae7b909a2009-11-22 22:01:10 +0000716 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000717 if (netdev->flags & IFF_ALLMULTI ||
718 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000719 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000720 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000721 goto done;
722 }
723
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000724 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800725 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000726done:
727 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700728}
729
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000730static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
731{
732 struct be_adapter *adapter = netdev_priv(netdev);
733 int status;
734
735 if (!adapter->sriov_enabled)
736 return -EPERM;
737
738 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
739 return -EINVAL;
740
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000741 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
742 status = be_cmd_pmac_del(adapter,
743 adapter->vf_cfg[vf].vf_if_handle,
744 adapter->vf_cfg[vf].vf_pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000745
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000746 status = be_cmd_pmac_add(adapter, mac,
747 adapter->vf_cfg[vf].vf_if_handle,
748 &adapter->vf_cfg[vf].vf_pmac_id);
749
750 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000751 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
752 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000753 else
754 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
755
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000756 return status;
757}
758
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000759static int be_get_vf_config(struct net_device *netdev, int vf,
760 struct ifla_vf_info *vi)
761{
762 struct be_adapter *adapter = netdev_priv(netdev);
763
764 if (!adapter->sriov_enabled)
765 return -EPERM;
766
767 if (vf >= num_vfs)
768 return -EINVAL;
769
770 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000771 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000772 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000773 vi->qos = 0;
774 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
775
776 return 0;
777}
778
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000779static int be_set_vf_vlan(struct net_device *netdev,
780 int vf, u16 vlan, u8 qos)
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
783 int status = 0;
784
785 if (!adapter->sriov_enabled)
786 return -EPERM;
787
788 if ((vf >= num_vfs) || (vlan > 4095))
789 return -EINVAL;
790
791 if (vlan) {
792 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
793 adapter->vlans_added++;
794 } else {
795 adapter->vf_cfg[vf].vf_vlan_tag = 0;
796 adapter->vlans_added--;
797 }
798
799 status = be_vid_config(adapter, true, vf);
800
801 if (status)
802 dev_info(&adapter->pdev->dev,
803 "VLAN %d config on VF %d failed\n", vlan, vf);
804 return status;
805}
806
Ajit Khapardee1d18732010-07-23 01:52:13 +0000807static int be_set_vf_tx_rate(struct net_device *netdev,
808 int vf, int rate)
809{
810 struct be_adapter *adapter = netdev_priv(netdev);
811 int status = 0;
812
813 if (!adapter->sriov_enabled)
814 return -EPERM;
815
816 if ((vf >= num_vfs) || (rate < 0))
817 return -EINVAL;
818
819 if (rate > 10000)
820 rate = 10000;
821
822 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000823 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000824
825 if (status)
826 dev_info(&adapter->pdev->dev,
827 "tx rate %d on VF %d failed\n", rate, vf);
828 return status;
829}
830
Sathya Perla3abcded2010-10-03 22:12:27 -0700831static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832{
Sathya Perla3abcded2010-10-03 22:12:27 -0700833 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700834 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Sathya Perla4097f662009-03-24 16:40:13 -0700836 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700837 if (time_before(now, stats->rx_jiffies)) {
838 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700839 return;
840 }
841
842 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700843 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700844 return;
845
Sathya Perla3abcded2010-10-03 22:12:27 -0700846 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
847 now - stats->rx_jiffies);
848 stats->rx_jiffies = now;
849 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700850}
851
Sathya Perla3abcded2010-10-03 22:12:27 -0700852static void be_rx_stats_update(struct be_rx_obj *rxo,
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000853 u32 pktsize, u16 numfrags, u8 pkt_type)
Sathya Perla4097f662009-03-24 16:40:13 -0700854{
Sathya Perla3abcded2010-10-03 22:12:27 -0700855 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700856
Sathya Perla3abcded2010-10-03 22:12:27 -0700857 stats->rx_compl++;
858 stats->rx_frags += numfrags;
859 stats->rx_bytes += pktsize;
860 stats->rx_pkts++;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000861 if (pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700862 stats->rx_mcast_pkts++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863}
864
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000865static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700866{
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000867 u8 l4_cksm, ipv6, ipcksm;
Ajit Khaparde728a9972009-04-13 15:41:22 -0700868
869 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
870 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000871 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700872
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000873 /* Ignore ipcksm for ipv6 pkts */
874 return l4_cksm && (ipcksm || ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700875}
876
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700878get_rx_page_info(struct be_adapter *adapter,
879 struct be_rx_obj *rxo,
880 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881{
882 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700883 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884
Sathya Perla3abcded2010-10-03 22:12:27 -0700885 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 BUG_ON(!rx_page_info->page);
887
Ajit Khaparde205859a2010-02-09 01:34:21 +0000888 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000889 dma_unmap_page(&adapter->pdev->dev,
890 dma_unmap_addr(rx_page_info, bus),
891 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000892 rx_page_info->last_page_user = false;
893 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700894
895 atomic_dec(&rxq->used);
896 return rx_page_info;
897}
898
899/* Throwaway the data in the Rx completion */
900static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700901 struct be_rx_obj *rxo,
902 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700903{
Sathya Perla3abcded2010-10-03 22:12:27 -0700904 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700905 struct be_rx_page_info *page_info;
906 u16 rxq_idx, i, num_rcvd;
907
908 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
909 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
910
Sathya Perla64642812010-12-01 01:04:17 +0000911 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
912 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
913
914 rxo->last_frag_index = rxq_idx;
915
916 for (i = 0; i < num_rcvd; i++) {
917 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
918 put_page(page_info->page);
919 memset(page_info, 0, sizeof(*page_info));
920 index_inc(&rxq_idx, rxq->len);
921 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700922 }
923}
924
925/*
926 * skb_fill_rx_data forms a complete skb for an ether frame
927 * indicated by rxcp.
928 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700929static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla89420422010-02-17 01:35:26 +0000930 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
931 u16 num_rcvd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932{
Sathya Perla3abcded2010-10-03 22:12:27 -0700933 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700934 struct be_rx_page_info *page_info;
Sathya Perla89420422010-02-17 01:35:26 +0000935 u16 rxq_idx, i, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700936 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700937 u8 *start;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000938 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700939
940 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
941 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000942 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943
Sathya Perla3abcded2010-10-03 22:12:27 -0700944 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700945
946 start = page_address(page_info->page) + page_info->page_offset;
947 prefetch(start);
948
949 /* Copy data in the first descriptor of this completion */
950 curr_frag_len = min(pktsize, rx_frag_size);
951
952 /* Copy the header portion into skb_data */
953 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
954 memcpy(skb->data, start, hdr_len);
955 skb->len = curr_frag_len;
956 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
957 /* Complete packet has now been moved to data */
958 put_page(page_info->page);
959 skb->data_len = 0;
960 skb->tail += curr_frag_len;
961 } else {
962 skb_shinfo(skb)->nr_frags = 1;
963 skb_shinfo(skb)->frags[0].page = page_info->page;
964 skb_shinfo(skb)->frags[0].page_offset =
965 page_info->page_offset + hdr_len;
966 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
967 skb->data_len = curr_frag_len - hdr_len;
968 skb->tail += hdr_len;
969 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000970 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700971
972 if (pktsize <= rx_frag_size) {
973 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000974 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700975 }
976
977 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700978 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000979 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700980 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700981 index_inc(&rxq_idx, rxq->len);
Sathya Perla3abcded2010-10-03 22:12:27 -0700982 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700983
Ajit Khapardefa774062009-07-22 09:28:55 -0700984 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700985
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000986 /* Coalesce all frags from the same physical page in one slot */
987 if (page_info->page_offset == 0) {
988 /* Fresh page */
989 j++;
990 skb_shinfo(skb)->frags[j].page = page_info->page;
991 skb_shinfo(skb)->frags[j].page_offset =
992 page_info->page_offset;
993 skb_shinfo(skb)->frags[j].size = 0;
994 skb_shinfo(skb)->nr_frags++;
995 } else {
996 put_page(page_info->page);
997 }
998
999 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000 skb->len += curr_frag_len;
1001 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002
Ajit Khaparde205859a2010-02-09 01:34:21 +00001003 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001004 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001005 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006
Sathya Perla76fbb422009-06-10 02:21:56 +00001007done:
Sathya Perla3abcded2010-10-03 22:12:27 -07001008 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009}
1010
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001011/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001013 struct be_rx_obj *rxo,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014 struct be_eth_rx_compl *rxcp)
1015{
1016 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001017 u32 vlanf, vid;
Sathya Perla89420422010-02-17 01:35:26 +00001018 u16 num_rcvd;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001019 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020
Sathya Perla89420422010-02-17 01:35:26 +00001021 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
Sathya Perla89420422010-02-17 01:35:26 +00001022
Eric Dumazet89d71a62009-10-13 05:34:20 +00001023 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001024 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025 if (net_ratelimit())
1026 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001027 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028 return;
1029 }
1030
Sathya Perla3abcded2010-10-03 22:12:27 -07001031 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001032
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001033 if (likely(adapter->rx_csum && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001034 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001035 else
1036 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001037
1038 skb->truesize = skb->len + sizeof(struct sk_buff);
1039 skb->protocol = eth_type_trans(skb, adapter->netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040
Sathya Perlaa058a632010-02-17 01:34:22 +00001041 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1042 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1043
1044 /* vlanf could be wrongly set in some cards.
1045 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001046 if ((adapter->function_mode & 0x400) && !vtm)
Sathya Perlaa058a632010-02-17 01:34:22 +00001047 vlanf = 0;
1048
1049 if (unlikely(vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001050 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051 kfree_skb(skb);
1052 return;
1053 }
1054 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001055 if (!lancer_chip(adapter))
1056 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1058 } else {
1059 netif_receive_skb(skb);
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061}
1062
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001063/* Process the RX completion indicated by rxcp when GRO is enabled */
1064static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001065 struct be_rx_obj *rxo,
1066 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067{
1068 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001069 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001070 struct be_queue_info *rxq = &rxo->q;
1071 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001073 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001074 u8 vtm;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001075 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076
1077 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1078 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1079 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1080 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001081 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001082 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001083
1084 /* vlanf could be wrongly set in some cards.
1085 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001086 if ((adapter->function_mode & 0x400) && !vtm)
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001087 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001089 skb = napi_get_frags(&eq_obj->napi);
1090 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001091 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001092 return;
1093 }
1094
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001096 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001097 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098
1099 curr_frag_len = min(remaining, rx_frag_size);
1100
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001101 /* Coalesce all frags from the same physical page in one slot */
1102 if (i == 0 || page_info->page_offset == 0) {
1103 /* First frag or Fresh page */
1104 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001105 skb_shinfo(skb)->frags[j].page = page_info->page;
1106 skb_shinfo(skb)->frags[j].page_offset =
1107 page_info->page_offset;
1108 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001109 } else {
1110 put_page(page_info->page);
1111 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001112 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001113
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001114 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 memset(page_info, 0, sizeof(*page_info));
1117 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001118 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001120 skb_shinfo(skb)->nr_frags = j + 1;
1121 skb->len = pkt_size;
1122 skb->data_len = pkt_size;
1123 skb->truesize += pkt_size;
1124 skb->ip_summed = CHECKSUM_UNNECESSARY;
1125
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001127 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128 } else {
1129 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001130 if (!lancer_chip(adapter))
1131 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132
Ajit Khaparde82903e42010-02-09 01:34:57 +00001133 if (!adapter->vlan_grp || adapter->vlans_added == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134 return;
1135
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001136 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001137 }
1138
Sathya Perla3abcded2010-10-03 22:12:27 -07001139 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001140}
1141
Sathya Perla3abcded2010-10-03 22:12:27 -07001142static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143{
Sathya Perla3abcded2010-10-03 22:12:27 -07001144 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145
1146 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1147 return NULL;
1148
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001149 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1151
Sathya Perla3abcded2010-10-03 22:12:27 -07001152 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153 return rxcp;
1154}
1155
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001156/* To reset the valid bit, we need to reset the whole word as
1157 * when walking the queue the valid entries are little-endian
1158 * and invalid entries are host endian
1159 */
1160static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1161{
1162 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1163}
1164
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165static inline struct page *be_alloc_pages(u32 size)
1166{
1167 gfp_t alloc_flags = GFP_ATOMIC;
1168 u32 order = get_order(size);
1169 if (order > 0)
1170 alloc_flags |= __GFP_COMP;
1171 return alloc_pages(alloc_flags, order);
1172}
1173
1174/*
1175 * Allocate a page, split it to fragments of size rx_frag_size and post as
1176 * receive buffers to BE
1177 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001178static void be_post_rx_frags(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179{
Sathya Perla3abcded2010-10-03 22:12:27 -07001180 struct be_adapter *adapter = rxo->adapter;
1181 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001182 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001183 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 struct page *pagep = NULL;
1185 struct be_eth_rx_d *rxd;
1186 u64 page_dmaaddr = 0, frag_dmaaddr;
1187 u32 posted, page_offset = 0;
1188
Sathya Perla3abcded2010-10-03 22:12:27 -07001189 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1191 if (!pagep) {
1192 pagep = be_alloc_pages(adapter->big_page_size);
1193 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001194 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195 break;
1196 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001197 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1198 0, adapter->big_page_size,
1199 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 page_info->page_offset = 0;
1201 } else {
1202 get_page(pagep);
1203 page_info->page_offset = page_offset + rx_frag_size;
1204 }
1205 page_offset = page_info->page_offset;
1206 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001207 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1209
1210 rxd = queue_head_node(rxq);
1211 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1212 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001213
1214 /* Any space left in the current big page for another frag? */
1215 if ((page_offset + rx_frag_size + rx_frag_size) >
1216 adapter->big_page_size) {
1217 pagep = NULL;
1218 page_info->last_page_user = true;
1219 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001220
1221 prev_page_info = page_info;
1222 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223 page_info = &page_info_tbl[rxq->head];
1224 }
1225 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001226 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001227
1228 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001230 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001231 } else if (atomic_read(&rxq->used) == 0) {
1232 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001233 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235}
1236
Sathya Perla5fb379e2009-06-18 00:02:59 +00001237static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1240
1241 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1242 return NULL;
1243
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001244 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1246
1247 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1248
1249 queue_tail_inc(tx_cq);
1250 return txcp;
1251}
1252
1253static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1254{
1255 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001256 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1258 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001259 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1260 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001262 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001264 sent_skbs[txq->tail] = NULL;
1265
1266 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001267 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001268
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001269 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001271 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001272 unmap_tx_frag(&adapter->pdev->dev, wrb,
1273 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001274 unmap_skb_hdr = false;
1275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276 num_wrbs++;
1277 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001278 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279
1280 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001281
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282 kfree_skb(sent_skb);
1283}
1284
Sathya Perla859b1e42009-08-10 03:43:51 +00001285static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1286{
1287 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1288
1289 if (!eqe->evt)
1290 return NULL;
1291
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001292 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001293 eqe->evt = le32_to_cpu(eqe->evt);
1294 queue_tail_inc(&eq_obj->q);
1295 return eqe;
1296}
1297
1298static int event_handle(struct be_adapter *adapter,
1299 struct be_eq_obj *eq_obj)
1300{
1301 struct be_eq_entry *eqe;
1302 u16 num = 0;
1303
1304 while ((eqe = event_get(eq_obj)) != NULL) {
1305 eqe->evt = 0;
1306 num++;
1307 }
1308
1309 /* Deal with any spurious interrupts that come
1310 * without events
1311 */
1312 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1313 if (num)
1314 napi_schedule(&eq_obj->napi);
1315
1316 return num;
1317}
1318
1319/* Just read and notify events without processing them.
1320 * Used at the time of destroying event queues */
1321static void be_eq_clean(struct be_adapter *adapter,
1322 struct be_eq_obj *eq_obj)
1323{
1324 struct be_eq_entry *eqe;
1325 u16 num = 0;
1326
1327 while ((eqe = event_get(eq_obj)) != NULL) {
1328 eqe->evt = 0;
1329 num++;
1330 }
1331
1332 if (num)
1333 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1334}
1335
Sathya Perla3abcded2010-10-03 22:12:27 -07001336static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337{
1338 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001339 struct be_queue_info *rxq = &rxo->q;
1340 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 struct be_eth_rx_compl *rxcp;
1342 u16 tail;
1343
1344 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001345 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1346 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001347 be_rx_compl_reset(rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001348 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349 }
1350
1351 /* Then free posted rx buffer that were not used */
1352 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001353 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001354 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 put_page(page_info->page);
1356 memset(page_info, 0, sizeof(*page_info));
1357 }
1358 BUG_ON(atomic_read(&rxq->used));
1359}
1360
Sathya Perlaa8e91792009-08-10 03:42:43 +00001361static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001363 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001365 struct be_eth_tx_compl *txcp;
1366 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001367 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1368 struct sk_buff *sent_skb;
1369 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370
Sathya Perlaa8e91792009-08-10 03:42:43 +00001371 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1372 do {
1373 while ((txcp = be_tx_compl_get(tx_cq))) {
1374 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1375 wrb_index, txcp);
1376 be_tx_compl_process(adapter, end_idx);
1377 cmpl++;
1378 }
1379 if (cmpl) {
1380 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1381 cmpl = 0;
1382 }
1383
1384 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1385 break;
1386
1387 mdelay(1);
1388 } while (true);
1389
1390 if (atomic_read(&txq->used))
1391 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1392 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001393
1394 /* free posted tx for which compls will never arrive */
1395 while (atomic_read(&txq->used)) {
1396 sent_skb = sent_skbs[txq->tail];
1397 end_idx = txq->tail;
1398 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001399 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1400 txq->len);
Sathya Perlab03388d2010-02-18 00:37:17 +00001401 be_tx_compl_process(adapter, end_idx);
1402 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403}
1404
Sathya Perla5fb379e2009-06-18 00:02:59 +00001405static void be_mcc_queues_destroy(struct be_adapter *adapter)
1406{
1407 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001408
Sathya Perla8788fdc2009-07-27 22:52:03 +00001409 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001410 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001411 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001412 be_queue_free(adapter, q);
1413
Sathya Perla8788fdc2009-07-27 22:52:03 +00001414 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001415 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001416 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001417 be_queue_free(adapter, q);
1418}
1419
1420/* Must be called only after TX qs are created as MCC shares TX EQ */
1421static int be_mcc_queues_create(struct be_adapter *adapter)
1422{
1423 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001424
1425 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001426 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001427 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001428 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001429 goto err;
1430
1431 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001432 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001433 goto mcc_cq_free;
1434
1435 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001436 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001437 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1438 goto mcc_cq_destroy;
1439
1440 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001441 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001442 goto mcc_q_free;
1443
1444 return 0;
1445
1446mcc_q_free:
1447 be_queue_free(adapter, q);
1448mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001449 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001450mcc_cq_free:
1451 be_queue_free(adapter, cq);
1452err:
1453 return -1;
1454}
1455
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456static void be_tx_queues_destroy(struct be_adapter *adapter)
1457{
1458 struct be_queue_info *q;
1459
1460 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001461 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001462 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463 be_queue_free(adapter, q);
1464
1465 q = &adapter->tx_obj.cq;
1466 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001467 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468 be_queue_free(adapter, q);
1469
Sathya Perla859b1e42009-08-10 03:43:51 +00001470 /* Clear any residual events */
1471 be_eq_clean(adapter, &adapter->tx_eq);
1472
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 q = &adapter->tx_eq.q;
1474 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001475 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 be_queue_free(adapter, q);
1477}
1478
1479static int be_tx_queues_create(struct be_adapter *adapter)
1480{
1481 struct be_queue_info *eq, *q, *cq;
1482
1483 adapter->tx_eq.max_eqd = 0;
1484 adapter->tx_eq.min_eqd = 0;
1485 adapter->tx_eq.cur_eqd = 96;
1486 adapter->tx_eq.enable_aic = false;
1487 /* Alloc Tx Event queue */
1488 eq = &adapter->tx_eq.q;
1489 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1490 return -1;
1491
1492 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001493 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001495
1496 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1497
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001498
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 /* Alloc TX eth compl queue */
1500 cq = &adapter->tx_obj.cq;
1501 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1502 sizeof(struct be_eth_tx_compl)))
1503 goto tx_eq_destroy;
1504
1505 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001506 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507 goto tx_cq_free;
1508
1509 /* Alloc TX eth queue */
1510 q = &adapter->tx_obj.q;
1511 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1512 goto tx_cq_destroy;
1513
1514 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001515 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 goto tx_q_free;
1517 return 0;
1518
1519tx_q_free:
1520 be_queue_free(adapter, q);
1521tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001522 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523tx_cq_free:
1524 be_queue_free(adapter, cq);
1525tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001526 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527tx_eq_free:
1528 be_queue_free(adapter, eq);
1529 return -1;
1530}
1531
1532static void be_rx_queues_destroy(struct be_adapter *adapter)
1533{
1534 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001535 struct be_rx_obj *rxo;
1536 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 for_all_rx_queues(adapter, rxo, i) {
1539 q = &rxo->q;
1540 if (q->created) {
1541 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1542 /* After the rxq is invalidated, wait for a grace time
1543 * of 1ms for all dma to end and the flush compl to
1544 * arrive
1545 */
1546 mdelay(1);
1547 be_rx_q_clean(adapter, rxo);
1548 }
1549 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001550
Sathya Perla3abcded2010-10-03 22:12:27 -07001551 q = &rxo->cq;
1552 if (q->created)
1553 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1554 be_queue_free(adapter, q);
1555
1556 /* Clear any residual events */
1557 q = &rxo->rx_eq.q;
1558 if (q->created) {
1559 be_eq_clean(adapter, &rxo->rx_eq);
1560 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1561 }
1562 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564}
1565
1566static int be_rx_queues_create(struct be_adapter *adapter)
1567{
1568 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001569 struct be_rx_obj *rxo;
1570 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001573 for_all_rx_queues(adapter, rxo, i) {
1574 rxo->adapter = adapter;
Sathya Perla64642812010-12-01 01:04:17 +00001575 /* Init last_frag_index so that the frag index in the first
1576 * completion will never match */
1577 rxo->last_frag_index = 0xffff;
Sathya Perla3abcded2010-10-03 22:12:27 -07001578 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1579 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580
Sathya Perla3abcded2010-10-03 22:12:27 -07001581 /* EQ */
1582 eq = &rxo->rx_eq.q;
1583 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1584 sizeof(struct be_eq_entry));
1585 if (rc)
1586 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587
Sathya Perla3abcded2010-10-03 22:12:27 -07001588 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1589 if (rc)
1590 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001592 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1593
Sathya Perla3abcded2010-10-03 22:12:27 -07001594 /* CQ */
1595 cq = &rxo->cq;
1596 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1597 sizeof(struct be_eth_rx_compl));
1598 if (rc)
1599 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600
Sathya Perla3abcded2010-10-03 22:12:27 -07001601 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1602 if (rc)
1603 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001604 /* Rx Q */
1605 q = &rxo->q;
1606 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1607 sizeof(struct be_eth_rx_d));
1608 if (rc)
1609 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610
Sathya Perla3abcded2010-10-03 22:12:27 -07001611 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1612 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1613 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1614 if (rc)
1615 goto err;
1616 }
1617
1618 if (be_multi_rxq(adapter)) {
1619 u8 rsstable[MAX_RSS_QS];
1620
1621 for_all_rss_queues(adapter, rxo, i)
1622 rsstable[i] = rxo->rss_id;
1623
1624 rc = be_cmd_rss_config(adapter, rsstable,
1625 adapter->num_rx_qs - 1);
1626 if (rc)
1627 goto err;
1628 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629
1630 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001631err:
1632 be_rx_queues_destroy(adapter);
1633 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001636static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001637{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001638 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1639 if (!eqe->evt)
1640 return false;
1641 else
1642 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001643}
1644
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645static irqreturn_t be_intx(int irq, void *dev)
1646{
1647 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001648 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001649 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001651 if (lancer_chip(adapter)) {
1652 if (event_peek(&adapter->tx_eq))
1653 tx = event_handle(adapter, &adapter->tx_eq);
1654 for_all_rx_queues(adapter, rxo, i) {
1655 if (event_peek(&rxo->rx_eq))
1656 rx |= event_handle(adapter, &rxo->rx_eq);
1657 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001659 if (!(tx || rx))
1660 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001661
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001662 } else {
1663 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1664 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1665 if (!isr)
1666 return IRQ_NONE;
1667
1668 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1669 event_handle(adapter, &adapter->tx_eq);
1670
1671 for_all_rx_queues(adapter, rxo, i) {
1672 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1673 event_handle(adapter, &rxo->rx_eq);
1674 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001675 }
Sathya Perlac001c212009-07-01 01:06:07 +00001676
Sathya Perla8788fdc2009-07-27 22:52:03 +00001677 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678}
1679
1680static irqreturn_t be_msix_rx(int irq, void *dev)
1681{
Sathya Perla3abcded2010-10-03 22:12:27 -07001682 struct be_rx_obj *rxo = dev;
1683 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684
Sathya Perla3abcded2010-10-03 22:12:27 -07001685 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
1687 return IRQ_HANDLED;
1688}
1689
Sathya Perla5fb379e2009-06-18 00:02:59 +00001690static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691{
1692 struct be_adapter *adapter = dev;
1693
Sathya Perla8788fdc2009-07-27 22:52:03 +00001694 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
1696 return IRQ_HANDLED;
1697}
1698
Sathya Perla64642812010-12-01 01:04:17 +00001699static inline bool do_gro(struct be_rx_obj *rxo,
1700 struct be_eth_rx_compl *rxcp, u8 err)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1703
1704 if (err)
Sathya Perla3abcded2010-10-03 22:12:27 -07001705 rxo->stats.rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001707 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708}
1709
stephen hemminger49b05222010-10-21 07:50:48 +00001710static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001711{
1712 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001713 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1714 struct be_adapter *adapter = rxo->adapter;
1715 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 struct be_eth_rx_compl *rxcp;
1717 u32 work_done;
Sathya Perla64642812010-12-01 01:04:17 +00001718 u16 frag_index, num_rcvd;
1719 u8 err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720
Sathya Perla3abcded2010-10-03 22:12:27 -07001721 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001723 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724 if (!rxcp)
1725 break;
1726
Sathya Perla64642812010-12-01 01:04:17 +00001727 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1728 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1729 rxcp);
1730 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1731 rxcp);
1732
1733 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1734 if (likely(frag_index != rxo->last_frag_index &&
1735 num_rcvd != 0)) {
1736 rxo->last_frag_index = frag_index;
1737
1738 if (do_gro(rxo, rxcp, err))
1739 be_rx_compl_process_gro(adapter, rxo, rxcp);
1740 else
1741 be_rx_compl_process(adapter, rxo, rxcp);
1742 }
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001743
1744 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745 }
1746
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001748 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1749 be_post_rx_frags(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750
1751 /* All consumed */
1752 if (work_done < budget) {
1753 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001754 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755 } else {
1756 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001757 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 }
1759 return work_done;
1760}
1761
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001762/* As TX and MCC share the same EQ check for both TX and MCC completions.
1763 * For TX/MCC we don't honour budget; consume everything
1764 */
1765static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001767 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1768 struct be_adapter *adapter =
1769 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001770 struct be_queue_info *txq = &adapter->tx_obj.q;
1771 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001773 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 u16 end_idx;
1775
Sathya Perla5fb379e2009-06-18 00:02:59 +00001776 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001778 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001780 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781 }
1782
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001783 mcc_compl = be_process_mcc(adapter, &status);
1784
1785 napi_complete(napi);
1786
1787 if (mcc_compl) {
1788 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1789 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1790 }
1791
1792 if (tx_compl) {
1793 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001794
1795 /* As Tx wrbs have been freed up, wake up netdev queue if
1796 * it was stopped due to lack of tx wrbs.
1797 */
1798 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001800 netif_wake_queue(adapter->netdev);
1801 }
1802
Sathya Perla3abcded2010-10-03 22:12:27 -07001803 tx_stats(adapter)->be_tx_events++;
1804 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806
1807 return 1;
1808}
1809
Ajit Khaparded053de92010-09-03 06:23:30 +00001810void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001811{
1812 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1813 u32 i;
1814
1815 pci_read_config_dword(adapter->pdev,
1816 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1817 pci_read_config_dword(adapter->pdev,
1818 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1819 pci_read_config_dword(adapter->pdev,
1820 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1821 pci_read_config_dword(adapter->pdev,
1822 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1823
1824 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1825 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1826
Ajit Khaparded053de92010-09-03 06:23:30 +00001827 if (ue_status_lo || ue_status_hi) {
1828 adapter->ue_detected = true;
1829 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1830 }
1831
Ajit Khaparde7c185272010-07-29 06:16:33 +00001832 if (ue_status_lo) {
1833 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1834 if (ue_status_lo & 1)
1835 dev_err(&adapter->pdev->dev,
1836 "UE: %s bit set\n", ue_status_low_desc[i]);
1837 }
1838 }
1839 if (ue_status_hi) {
1840 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1841 if (ue_status_hi & 1)
1842 dev_err(&adapter->pdev->dev,
1843 "UE: %s bit set\n", ue_status_hi_desc[i]);
1844 }
1845 }
1846
1847}
1848
Sathya Perlaea1dae12009-03-19 23:56:20 -07001849static void be_worker(struct work_struct *work)
1850{
1851 struct be_adapter *adapter =
1852 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001853 struct be_rx_obj *rxo;
1854 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001855
Somnath Koturf203af72010-10-25 23:01:03 +00001856 /* when interrupts are not yet enabled, just reap any pending
1857 * mcc completions */
1858 if (!netif_running(adapter->netdev)) {
1859 int mcc_compl, status = 0;
1860
1861 mcc_compl = be_process_mcc(adapter, &status);
1862
1863 if (mcc_compl) {
1864 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1865 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1866 }
1867 goto reschedule;
1868 }
1869
Ajit Khaparde0fc48c32010-07-29 06:18:58 +00001870 if (!adapter->stats_ioctl_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001872
Sathya Perla4097f662009-03-24 16:40:13 -07001873 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001874
Sathya Perla3abcded2010-10-03 22:12:27 -07001875 for_all_rx_queues(adapter, rxo, i) {
1876 be_rx_rate_update(rxo);
1877 be_rx_eqd_update(adapter, rxo);
1878
1879 if (rxo->rx_post_starved) {
1880 rxo->rx_post_starved = false;
1881 be_post_rx_frags(rxo);
1882 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001883 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001884 if (!adapter->ue_detected && !lancer_chip(adapter))
Ajit Khaparded053de92010-09-03 06:23:30 +00001885 be_detect_dump_ue(adapter);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001886
Somnath Koturf203af72010-10-25 23:01:03 +00001887reschedule:
Sathya Perlaea1dae12009-03-19 23:56:20 -07001888 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1889}
1890
Sathya Perla8d56ff12009-11-22 22:02:26 +00001891static void be_msix_disable(struct be_adapter *adapter)
1892{
1893 if (adapter->msix_enabled) {
1894 pci_disable_msix(adapter->pdev);
1895 adapter->msix_enabled = false;
1896 }
1897}
1898
Sathya Perla3abcded2010-10-03 22:12:27 -07001899static int be_num_rxqs_get(struct be_adapter *adapter)
1900{
1901 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1902 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1903 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1904 } else {
1905 dev_warn(&adapter->pdev->dev,
1906 "No support for multiple RX queues\n");
1907 return 1;
1908 }
1909}
1910
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911static void be_msix_enable(struct be_adapter *adapter)
1912{
Sathya Perla3abcded2010-10-03 22:12:27 -07001913#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 int i, status;
1915
Sathya Perla3abcded2010-10-03 22:12:27 -07001916 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1917
1918 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919 adapter->msix_entries[i].entry = i;
1920
1921 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perla3abcded2010-10-03 22:12:27 -07001922 adapter->num_rx_qs + 1);
1923 if (status == 0) {
1924 goto done;
1925 } else if (status >= BE_MIN_MSIX_VECTORS) {
1926 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1927 status) == 0) {
1928 adapter->num_rx_qs = status - 1;
1929 dev_warn(&adapter->pdev->dev,
1930 "Could alloc only %d MSIx vectors. "
1931 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1932 goto done;
1933 }
1934 }
1935 return;
1936done:
1937 adapter->msix_enabled = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938}
1939
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001940static void be_sriov_enable(struct be_adapter *adapter)
1941{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001942 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001943#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001944 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001945 int status;
1946
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001947 status = pci_enable_sriov(adapter->pdev, num_vfs);
1948 adapter->sriov_enabled = status ? false : true;
1949 }
1950#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001951}
1952
1953static void be_sriov_disable(struct be_adapter *adapter)
1954{
1955#ifdef CONFIG_PCI_IOV
1956 if (adapter->sriov_enabled) {
1957 pci_disable_sriov(adapter->pdev);
1958 adapter->sriov_enabled = false;
1959 }
1960#endif
1961}
1962
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001963static inline int be_msix_vec_get(struct be_adapter *adapter,
1964 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001966 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00001967}
1968
1969static int be_request_irq(struct be_adapter *adapter,
1970 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001971 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001972{
1973 struct net_device *netdev = adapter->netdev;
1974 int vec;
1975
1976 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001977 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001978 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00001979}
1980
Sathya Perla3abcded2010-10-03 22:12:27 -07001981static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1982 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001983{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001984 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001985 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986}
1987
1988static int be_msix_register(struct be_adapter *adapter)
1989{
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 struct be_rx_obj *rxo;
1991 int status, i;
1992 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993
Sathya Perla3abcded2010-10-03 22:12:27 -07001994 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1995 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996 if (status)
1997 goto err;
1998
Sathya Perla3abcded2010-10-03 22:12:27 -07001999 for_all_rx_queues(adapter, rxo, i) {
2000 sprintf(qname, "rxq%d", i);
2001 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2002 qname, rxo);
2003 if (status)
2004 goto err_msix;
2005 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002006
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002008
Sathya Perla3abcded2010-10-03 22:12:27 -07002009err_msix:
2010 be_free_irq(adapter, &adapter->tx_eq, adapter);
2011
2012 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2013 be_free_irq(adapter, &rxo->rx_eq, rxo);
2014
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015err:
2016 dev_warn(&adapter->pdev->dev,
2017 "MSIX Request IRQ failed - err %d\n", status);
2018 pci_disable_msix(adapter->pdev);
2019 adapter->msix_enabled = false;
2020 return status;
2021}
2022
2023static int be_irq_register(struct be_adapter *adapter)
2024{
2025 struct net_device *netdev = adapter->netdev;
2026 int status;
2027
2028 if (adapter->msix_enabled) {
2029 status = be_msix_register(adapter);
2030 if (status == 0)
2031 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002032 /* INTx is not supported for VF */
2033 if (!be_physfn(adapter))
2034 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 }
2036
2037 /* INTx */
2038 netdev->irq = adapter->pdev->irq;
2039 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2040 adapter);
2041 if (status) {
2042 dev_err(&adapter->pdev->dev,
2043 "INTx request IRQ failed - err %d\n", status);
2044 return status;
2045 }
2046done:
2047 adapter->isr_registered = true;
2048 return 0;
2049}
2050
2051static void be_irq_unregister(struct be_adapter *adapter)
2052{
2053 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002054 struct be_rx_obj *rxo;
2055 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056
2057 if (!adapter->isr_registered)
2058 return;
2059
2060 /* INTx */
2061 if (!adapter->msix_enabled) {
2062 free_irq(netdev->irq, adapter);
2063 goto done;
2064 }
2065
2066 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002067 be_free_irq(adapter, &adapter->tx_eq, adapter);
2068
2069 for_all_rx_queues(adapter, rxo, i)
2070 be_free_irq(adapter, &rxo->rx_eq, rxo);
2071
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072done:
2073 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074}
2075
Sathya Perla889cd4b2010-05-30 23:33:45 +00002076static int be_close(struct net_device *netdev)
2077{
2078 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002079 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002080 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002081 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002082
Sathya Perla889cd4b2010-05-30 23:33:45 +00002083 be_async_mcc_disable(adapter);
2084
2085 netif_stop_queue(netdev);
2086 netif_carrier_off(netdev);
2087 adapter->link_up = false;
2088
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002089 if (!lancer_chip(adapter))
2090 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002091
2092 if (adapter->msix_enabled) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002093 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002094 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002095
2096 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002097 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002098 synchronize_irq(vec);
2099 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002100 } else {
2101 synchronize_irq(netdev->irq);
2102 }
2103 be_irq_unregister(adapter);
2104
Sathya Perla3abcded2010-10-03 22:12:27 -07002105 for_all_rx_queues(adapter, rxo, i)
2106 napi_disable(&rxo->rx_eq.napi);
2107
Sathya Perla889cd4b2010-05-30 23:33:45 +00002108 napi_disable(&tx_eq->napi);
2109
2110 /* Wait for all pending tx completions to arrive so that
2111 * all tx skbs are freed.
2112 */
2113 be_tx_compl_clean(adapter);
2114
2115 return 0;
2116}
2117
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002118static int be_open(struct net_device *netdev)
2119{
2120 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002122 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002123 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002124 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002125 u8 mac_speed;
2126 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002127
Sathya Perla3abcded2010-10-03 22:12:27 -07002128 for_all_rx_queues(adapter, rxo, i) {
2129 be_post_rx_frags(rxo);
2130 napi_enable(&rxo->rx_eq.napi);
2131 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002132 napi_enable(&tx_eq->napi);
2133
2134 be_irq_register(adapter);
2135
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002136 if (!lancer_chip(adapter))
2137 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002138
2139 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002140 for_all_rx_queues(adapter, rxo, i) {
2141 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2142 be_cq_notify(adapter, rxo->cq.id, true, 0);
2143 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002144 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002145
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002146 /* Now that interrupts are on we can process async mcc */
2147 be_async_mcc_enable(adapter);
2148
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002149 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2150 &link_speed);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002151 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002152 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002153 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002154
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002155 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002156 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002157 if (status)
2158 goto err;
2159
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002160 status = be_cmd_set_flow_control(adapter,
2161 adapter->tx_fc, adapter->rx_fc);
2162 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002163 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002164 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002165
Sathya Perla889cd4b2010-05-30 23:33:45 +00002166 return 0;
2167err:
2168 be_close(adapter->netdev);
2169 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002170}
2171
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002172static int be_setup_wol(struct be_adapter *adapter, bool enable)
2173{
2174 struct be_dma_mem cmd;
2175 int status = 0;
2176 u8 mac[ETH_ALEN];
2177
2178 memset(mac, 0, ETH_ALEN);
2179
2180 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002181 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2182 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002183 if (cmd.va == NULL)
2184 return -1;
2185 memset(cmd.va, 0, cmd.size);
2186
2187 if (enable) {
2188 status = pci_write_config_dword(adapter->pdev,
2189 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2190 if (status) {
2191 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002192 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002193 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2194 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002195 return status;
2196 }
2197 status = be_cmd_enable_magic_wol(adapter,
2198 adapter->netdev->dev_addr, &cmd);
2199 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2200 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2201 } else {
2202 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2203 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2204 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2205 }
2206
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002207 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002208 return status;
2209}
2210
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002211/*
2212 * Generate a seed MAC address from the PF MAC Address using jhash.
2213 * MAC Address for VFs are assigned incrementally starting from the seed.
2214 * These addresses are programmed in the ASIC by the PF and the VF driver
2215 * queries for the MAC address during its probe.
2216 */
2217static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2218{
2219 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002221 u8 mac[ETH_ALEN];
2222
2223 be_vf_eth_addr_generate(adapter, mac);
2224
2225 for (vf = 0; vf < num_vfs; vf++) {
2226 status = be_cmd_pmac_add(adapter, mac,
2227 adapter->vf_cfg[vf].vf_if_handle,
2228 &adapter->vf_cfg[vf].vf_pmac_id);
2229 if (status)
2230 dev_err(&adapter->pdev->dev,
2231 "Mac address add failed for VF %d\n", vf);
2232 else
2233 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2234
2235 mac[5] += 1;
2236 }
2237 return status;
2238}
2239
2240static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2241{
2242 u32 vf;
2243
2244 for (vf = 0; vf < num_vfs; vf++) {
2245 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2246 be_cmd_pmac_del(adapter,
2247 adapter->vf_cfg[vf].vf_if_handle,
2248 adapter->vf_cfg[vf].vf_pmac_id);
2249 }
2250}
2251
Sathya Perla5fb379e2009-06-18 00:02:59 +00002252static int be_setup(struct be_adapter *adapter)
2253{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002254 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002255 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002257 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002259 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2260
2261 if (be_physfn(adapter)) {
2262 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2263 BE_IF_FLAGS_PROMISCUOUS |
2264 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2265 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002266
2267 if (be_multi_rxq(adapter)) {
2268 cap_flags |= BE_IF_FLAGS_RSS;
2269 en_flags |= BE_IF_FLAGS_RSS;
2270 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002271 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002272
2273 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2274 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002275 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276 if (status != 0)
2277 goto do_none;
2278
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002279 if (be_physfn(adapter)) {
2280 while (vf < num_vfs) {
2281 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2282 | BE_IF_FLAGS_BROADCAST;
2283 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002284 mac, true,
2285 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002286 NULL, vf+1);
2287 if (status) {
2288 dev_err(&adapter->pdev->dev,
2289 "Interface Create failed for VF %d\n", vf);
2290 goto if_destroy;
2291 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002292 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002293 vf++;
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002294 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002295 } else if (!be_physfn(adapter)) {
2296 status = be_cmd_mac_addr_query(adapter, mac,
2297 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2298 if (!status) {
2299 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2300 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2301 }
2302 }
2303
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304 status = be_tx_queues_create(adapter);
2305 if (status != 0)
2306 goto if_destroy;
2307
2308 status = be_rx_queues_create(adapter);
2309 if (status != 0)
2310 goto tx_qs_destroy;
2311
Sathya Perla5fb379e2009-06-18 00:02:59 +00002312 status = be_mcc_queues_create(adapter);
2313 if (status != 0)
2314 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002315
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002316 if (be_physfn(adapter)) {
2317 status = be_vf_eth_addr_config(adapter);
2318 if (status)
2319 goto mcc_q_destroy;
2320 }
2321
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002322 adapter->link_speed = -1;
2323
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 return 0;
2325
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002326mcc_q_destroy:
2327 if (be_physfn(adapter))
2328 be_vf_eth_addr_rem(adapter);
2329 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002330rx_qs_destroy:
2331 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332tx_qs_destroy:
2333 be_tx_queues_destroy(adapter);
2334if_destroy:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002335 for (vf = 0; vf < num_vfs; vf++)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002336 if (adapter->vf_cfg[vf].vf_if_handle)
2337 be_cmd_if_destroy(adapter,
2338 adapter->vf_cfg[vf].vf_if_handle);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002339 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340do_none:
2341 return status;
2342}
2343
Sathya Perla5fb379e2009-06-18 00:02:59 +00002344static int be_clear(struct be_adapter *adapter)
2345{
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002346 if (be_physfn(adapter))
2347 be_vf_eth_addr_rem(adapter);
2348
Sathya Perla1a8887d2009-08-17 00:58:41 +00002349 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002350 be_rx_queues_destroy(adapter);
2351 be_tx_queues_destroy(adapter);
2352
Sathya Perla8788fdc2009-07-27 22:52:03 +00002353 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002354
Sathya Perla2243e2e2009-11-22 22:02:03 +00002355 /* tell fw we're done with firing cmds */
2356 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002357 return 0;
2358}
2359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002360
Ajit Khaparde84517482009-09-04 03:12:16 +00002361#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002362static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002363 const u8 *p, u32 img_start, int image_size,
2364 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002365{
2366 u32 crc_offset;
2367 u8 flashed_crc[4];
2368 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002369
2370 crc_offset = hdr_size + img_start + image_size - 4;
2371
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002372 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002373
2374 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002375 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002376 if (status) {
2377 dev_err(&adapter->pdev->dev,
2378 "could not get crc from flash, not flashing redboot\n");
2379 return false;
2380 }
2381
2382 /*update redboot only if crc does not match*/
2383 if (!memcmp(flashed_crc, p, 4))
2384 return false;
2385 else
2386 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002387}
2388
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002389static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002390 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002391 struct be_dma_mem *flash_cmd, int num_of_images)
2392
Ajit Khaparde84517482009-09-04 03:12:16 +00002393{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002394 int status = 0, i, filehdr_size = 0;
2395 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002396 int num_bytes;
2397 const u8 *p = fw->data;
2398 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002399 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002400 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002401
Joe Perches215faf92010-12-21 02:16:10 -08002402 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002403 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2404 FLASH_IMAGE_MAX_SIZE_g3},
2405 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2406 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2407 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2408 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2409 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2410 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2412 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2414 FLASH_IMAGE_MAX_SIZE_g3},
2415 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2416 FLASH_IMAGE_MAX_SIZE_g3},
2417 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002418 FLASH_IMAGE_MAX_SIZE_g3},
2419 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2420 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002421 };
Joe Perches215faf92010-12-21 02:16:10 -08002422 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002423 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2424 FLASH_IMAGE_MAX_SIZE_g2},
2425 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2426 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2427 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2428 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2429 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2430 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2432 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2434 FLASH_IMAGE_MAX_SIZE_g2},
2435 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2436 FLASH_IMAGE_MAX_SIZE_g2},
2437 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2438 FLASH_IMAGE_MAX_SIZE_g2}
2439 };
2440
2441 if (adapter->generation == BE_GEN3) {
2442 pflashcomp = gen3_flash_types;
2443 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002444 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002445 } else {
2446 pflashcomp = gen2_flash_types;
2447 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002448 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002449 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002450 for (i = 0; i < num_comp; i++) {
2451 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2452 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2453 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002454 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2455 (!be_flash_redboot(adapter, fw->data,
2456 pflashcomp[i].offset, pflashcomp[i].size,
2457 filehdr_size)))
2458 continue;
2459 p = fw->data;
2460 p += filehdr_size + pflashcomp[i].offset
2461 + (num_of_images * sizeof(struct image_hdr));
2462 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002463 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002464 total_bytes = pflashcomp[i].size;
2465 while (total_bytes) {
2466 if (total_bytes > 32*1024)
2467 num_bytes = 32*1024;
2468 else
2469 num_bytes = total_bytes;
2470 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002471
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002472 if (!total_bytes)
2473 flash_op = FLASHROM_OPER_FLASH;
2474 else
2475 flash_op = FLASHROM_OPER_SAVE;
2476 memcpy(req->params.data_buf, p, num_bytes);
2477 p += num_bytes;
2478 status = be_cmd_write_flashrom(adapter, flash_cmd,
2479 pflashcomp[i].optype, flash_op, num_bytes);
2480 if (status) {
2481 dev_err(&adapter->pdev->dev,
2482 "cmd to write to flash rom failed.\n");
2483 return -1;
2484 }
2485 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002486 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002487 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002488 return 0;
2489}
2490
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002491static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2492{
2493 if (fhdr == NULL)
2494 return 0;
2495 if (fhdr->build[0] == '3')
2496 return BE_GEN3;
2497 else if (fhdr->build[0] == '2')
2498 return BE_GEN2;
2499 else
2500 return 0;
2501}
2502
Ajit Khaparde84517482009-09-04 03:12:16 +00002503int be_load_fw(struct be_adapter *adapter, u8 *func)
2504{
2505 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2506 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002507 struct flash_file_hdr_g2 *fhdr;
2508 struct flash_file_hdr_g3 *fhdr3;
2509 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002510 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002511 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002512 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002513
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002514 if (!netif_running(adapter->netdev)) {
2515 dev_err(&adapter->pdev->dev,
2516 "Firmware load not allowed (interface is down)\n");
2517 return -EPERM;
2518 }
2519
Ajit Khaparde84517482009-09-04 03:12:16 +00002520 strcpy(fw_file, func);
2521
2522 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2523 if (status)
2524 goto fw_exit;
2525
2526 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002527 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002528 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2529
Ajit Khaparde84517482009-09-04 03:12:16 +00002530 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002531 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2532 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002533 if (!flash_cmd.va) {
2534 status = -ENOMEM;
2535 dev_err(&adapter->pdev->dev,
2536 "Memory allocation failure while flashing\n");
2537 goto fw_exit;
2538 }
2539
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002540 if ((adapter->generation == BE_GEN3) &&
2541 (get_ufigen_type(fhdr) == BE_GEN3)) {
2542 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002543 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2544 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002545 img_hdr_ptr = (struct image_hdr *) (fw->data +
2546 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002547 i * sizeof(struct image_hdr)));
2548 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2549 status = be_flash_data(adapter, fw, &flash_cmd,
2550 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002551 }
2552 } else if ((adapter->generation == BE_GEN2) &&
2553 (get_ufigen_type(fhdr) == BE_GEN2)) {
2554 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2555 } else {
2556 dev_err(&adapter->pdev->dev,
2557 "UFI and Interface are not compatible for flashing\n");
2558 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002559 }
2560
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002561 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2562 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002563 if (status) {
2564 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2565 goto fw_exit;
2566 }
2567
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002568 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002569
2570fw_exit:
2571 release_firmware(fw);
2572 return status;
2573}
2574
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575static struct net_device_ops be_netdev_ops = {
2576 .ndo_open = be_open,
2577 .ndo_stop = be_close,
2578 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579 .ndo_set_rx_mode = be_set_multicast_list,
2580 .ndo_set_mac_address = be_mac_addr_set,
2581 .ndo_change_mtu = be_change_mtu,
2582 .ndo_validate_addr = eth_validate_addr,
2583 .ndo_vlan_rx_register = be_vlan_register,
2584 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2585 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002586 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002587 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002588 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002589 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002590};
2591
2592static void be_netdev_init(struct net_device *netdev)
2593{
2594 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002595 struct be_rx_obj *rxo;
2596 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597
2598 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Michał Mirosław79032642010-11-30 06:38:00 +00002599 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2600 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ajit Khaparde49e4b8472010-06-14 04:56:07 +00002601 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002602
Michał Mirosław79032642010-11-30 06:38:00 +00002603 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2604 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002605
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002606 if (lancer_chip(adapter))
2607 netdev->vlan_features |= NETIF_F_TSO6;
2608
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002609 netdev->flags |= IFF_MULTICAST;
2610
Ajit Khaparde728a9972009-04-13 15:41:22 -07002611 adapter->rx_csum = true;
2612
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002613 /* Default settings for Rx and Tx flow control */
2614 adapter->rx_fc = true;
2615 adapter->tx_fc = true;
2616
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002617 netif_set_gso_max_size(netdev, 65535);
2618
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002619 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2620
2621 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2622
Sathya Perla3abcded2010-10-03 22:12:27 -07002623 for_all_rx_queues(adapter, rxo, i)
2624 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2625 BE_NAPI_WEIGHT);
2626
Sathya Perla5fb379e2009-06-18 00:02:59 +00002627 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002629}
2630
2631static void be_unmap_pci_bars(struct be_adapter *adapter)
2632{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002633 if (adapter->csr)
2634 iounmap(adapter->csr);
2635 if (adapter->db)
2636 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002637 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002638 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002639}
2640
2641static int be_map_pci_bars(struct be_adapter *adapter)
2642{
2643 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002644 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002645
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002646 if (lancer_chip(adapter)) {
2647 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2648 pci_resource_len(adapter->pdev, 0));
2649 if (addr == NULL)
2650 return -ENOMEM;
2651 adapter->db = addr;
2652 return 0;
2653 }
2654
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002655 if (be_physfn(adapter)) {
2656 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2657 pci_resource_len(adapter->pdev, 2));
2658 if (addr == NULL)
2659 return -ENOMEM;
2660 adapter->csr = addr;
2661 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002663 if (adapter->generation == BE_GEN2) {
2664 pcicfg_reg = 1;
2665 db_reg = 4;
2666 } else {
2667 pcicfg_reg = 0;
2668 if (be_physfn(adapter))
2669 db_reg = 4;
2670 else
2671 db_reg = 0;
2672 }
2673 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2674 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002675 if (addr == NULL)
2676 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002677 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002678
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002679 if (be_physfn(adapter)) {
2680 addr = ioremap_nocache(
2681 pci_resource_start(adapter->pdev, pcicfg_reg),
2682 pci_resource_len(adapter->pdev, pcicfg_reg));
2683 if (addr == NULL)
2684 goto pci_map_err;
2685 adapter->pcicfg = addr;
2686 } else
2687 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002688
2689 return 0;
2690pci_map_err:
2691 be_unmap_pci_bars(adapter);
2692 return -ENOMEM;
2693}
2694
2695
2696static void be_ctrl_cleanup(struct be_adapter *adapter)
2697{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002698 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002699
2700 be_unmap_pci_bars(adapter);
2701
2702 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002703 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2704 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002705
2706 mem = &adapter->mc_cmd_mem;
2707 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002708 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2709 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710}
2711
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002712static int be_ctrl_init(struct be_adapter *adapter)
2713{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002714 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2715 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002716 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002717 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718
2719 status = be_map_pci_bars(adapter);
2720 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002721 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002722
2723 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002724 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2725 mbox_mem_alloc->size,
2726 &mbox_mem_alloc->dma,
2727 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002728 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002729 status = -ENOMEM;
2730 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002731 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002732
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002733 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2734 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2735 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2736 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002737
2738 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002739 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2740 mc_cmd_mem->size, &mc_cmd_mem->dma,
2741 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002742 if (mc_cmd_mem->va == NULL) {
2743 status = -ENOMEM;
2744 goto free_mbox;
2745 }
2746 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2747
Ivan Vecera29849612010-12-14 05:43:19 +00002748 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002749 spin_lock_init(&adapter->mcc_lock);
2750 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002751
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002752 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002753 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002755
2756free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002757 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2758 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002759
2760unmap_pci_bars:
2761 be_unmap_pci_bars(adapter);
2762
2763done:
2764 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002765}
2766
2767static void be_stats_cleanup(struct be_adapter *adapter)
2768{
Sathya Perla3abcded2010-10-03 22:12:27 -07002769 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002770
2771 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002772 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2773 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002774}
2775
2776static int be_stats_init(struct be_adapter *adapter)
2777{
Sathya Perla3abcded2010-10-03 22:12:27 -07002778 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002779
2780 cmd->size = sizeof(struct be_cmd_req_get_stats);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002781 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2782 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002783 if (cmd->va == NULL)
2784 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002785 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786 return 0;
2787}
2788
2789static void __devexit be_remove(struct pci_dev *pdev)
2790{
2791 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002792
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002793 if (!adapter)
2794 return;
2795
Somnath Koturf203af72010-10-25 23:01:03 +00002796 cancel_delayed_work_sync(&adapter->work);
2797
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002798 unregister_netdev(adapter->netdev);
2799
Sathya Perla5fb379e2009-06-18 00:02:59 +00002800 be_clear(adapter);
2801
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002802 be_stats_cleanup(adapter);
2803
2804 be_ctrl_cleanup(adapter);
2805
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002806 be_sriov_disable(adapter);
2807
Sathya Perla8d56ff12009-11-22 22:02:26 +00002808 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002809
2810 pci_set_drvdata(pdev, NULL);
2811 pci_release_regions(pdev);
2812 pci_disable_device(pdev);
2813
2814 free_netdev(adapter->netdev);
2815}
2816
Sathya Perla2243e2e2009-11-22 22:02:03 +00002817static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002818{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002819 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002820 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002821
Sathya Perla8788fdc2009-07-27 22:52:03 +00002822 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002823 if (status)
2824 return status;
2825
Sathya Perla3abcded2010-10-03 22:12:27 -07002826 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2827 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002828 if (status)
2829 return status;
2830
2831 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002832
2833 if (be_physfn(adapter)) {
2834 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002835 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002836
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002837 if (status)
2838 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002839
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002840 if (!is_valid_ether_addr(mac))
2841 return -EADDRNOTAVAIL;
2842
2843 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2844 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2845 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002846
Ajit Khaparde3486be22010-07-23 02:04:54 +00002847 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002848 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2849 else
2850 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2851
Sathya Perla2243e2e2009-11-22 22:02:03 +00002852 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853}
2854
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002855static int be_dev_family_check(struct be_adapter *adapter)
2856{
2857 struct pci_dev *pdev = adapter->pdev;
2858 u32 sli_intf = 0, if_type;
2859
2860 switch (pdev->device) {
2861 case BE_DEVICE_ID1:
2862 case OC_DEVICE_ID1:
2863 adapter->generation = BE_GEN2;
2864 break;
2865 case BE_DEVICE_ID2:
2866 case OC_DEVICE_ID2:
2867 adapter->generation = BE_GEN3;
2868 break;
2869 case OC_DEVICE_ID3:
2870 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2871 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2872 SLI_INTF_IF_TYPE_SHIFT;
2873
2874 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2875 if_type != 0x02) {
2876 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2877 return -EINVAL;
2878 }
2879 if (num_vfs > 0) {
2880 dev_err(&pdev->dev, "VFs not supported\n");
2881 return -EINVAL;
2882 }
2883 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2884 SLI_INTF_FAMILY_SHIFT);
2885 adapter->generation = BE_GEN3;
2886 break;
2887 default:
2888 adapter->generation = 0;
2889 }
2890 return 0;
2891}
2892
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002893static int __devinit be_probe(struct pci_dev *pdev,
2894 const struct pci_device_id *pdev_id)
2895{
2896 int status = 0;
2897 struct be_adapter *adapter;
2898 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899
2900 status = pci_enable_device(pdev);
2901 if (status)
2902 goto do_none;
2903
2904 status = pci_request_regions(pdev, DRV_NAME);
2905 if (status)
2906 goto disable_dev;
2907 pci_set_master(pdev);
2908
2909 netdev = alloc_etherdev(sizeof(struct be_adapter));
2910 if (netdev == NULL) {
2911 status = -ENOMEM;
2912 goto rel_reg;
2913 }
2914 adapter = netdev_priv(netdev);
2915 adapter->pdev = pdev;
2916 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002917
2918 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00002919 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002920 goto free_netdev;
2921
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002922 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002923 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002925 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002926 if (!status) {
2927 netdev->features |= NETIF_F_HIGHDMA;
2928 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002929 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002930 if (status) {
2931 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2932 goto free_netdev;
2933 }
2934 }
2935
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002936 be_sriov_enable(adapter);
2937
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002938 status = be_ctrl_init(adapter);
2939 if (status)
2940 goto free_netdev;
2941
Sathya Perla2243e2e2009-11-22 22:02:03 +00002942 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002943 if (be_physfn(adapter)) {
2944 status = be_cmd_POST(adapter);
2945 if (status)
2946 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002947 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002948
2949 /* tell fw we're ready to fire cmds */
2950 status = be_cmd_fw_init(adapter);
2951 if (status)
2952 goto ctrl_clean;
2953
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07002954 if (be_physfn(adapter)) {
2955 status = be_cmd_reset_function(adapter);
2956 if (status)
2957 goto ctrl_clean;
2958 }
2959
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002960 status = be_stats_init(adapter);
2961 if (status)
2962 goto ctrl_clean;
2963
Sathya Perla2243e2e2009-11-22 22:02:03 +00002964 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002965 if (status)
2966 goto stats_clean;
2967
Sathya Perla3abcded2010-10-03 22:12:27 -07002968 be_msix_enable(adapter);
2969
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002970 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002971
Sathya Perla5fb379e2009-06-18 00:02:59 +00002972 status = be_setup(adapter);
2973 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002974 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002975
Sathya Perla3abcded2010-10-03 22:12:27 -07002976 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002977 status = register_netdev(netdev);
2978 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002979 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00002980 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002982 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00002983 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002984 return 0;
2985
Sathya Perla5fb379e2009-06-18 00:02:59 +00002986unsetup:
2987 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07002988msix_disable:
2989 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002990stats_clean:
2991 be_stats_cleanup(adapter);
2992ctrl_clean:
2993 be_ctrl_cleanup(adapter);
2994free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002995 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002996 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002997 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998rel_reg:
2999 pci_release_regions(pdev);
3000disable_dev:
3001 pci_disable_device(pdev);
3002do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003003 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003004 return status;
3005}
3006
3007static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3008{
3009 struct be_adapter *adapter = pci_get_drvdata(pdev);
3010 struct net_device *netdev = adapter->netdev;
3011
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003012 if (adapter->wol)
3013 be_setup_wol(adapter, true);
3014
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003015 netif_device_detach(netdev);
3016 if (netif_running(netdev)) {
3017 rtnl_lock();
3018 be_close(netdev);
3019 rtnl_unlock();
3020 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003021 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003022 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023
3024 pci_save_state(pdev);
3025 pci_disable_device(pdev);
3026 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3027 return 0;
3028}
3029
3030static int be_resume(struct pci_dev *pdev)
3031{
3032 int status = 0;
3033 struct be_adapter *adapter = pci_get_drvdata(pdev);
3034 struct net_device *netdev = adapter->netdev;
3035
3036 netif_device_detach(netdev);
3037
3038 status = pci_enable_device(pdev);
3039 if (status)
3040 return status;
3041
3042 pci_set_power_state(pdev, 0);
3043 pci_restore_state(pdev);
3044
Sathya Perla2243e2e2009-11-22 22:02:03 +00003045 /* tell fw we're ready to fire cmds */
3046 status = be_cmd_fw_init(adapter);
3047 if (status)
3048 return status;
3049
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003050 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003051 if (netif_running(netdev)) {
3052 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053 be_open(netdev);
3054 rtnl_unlock();
3055 }
3056 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003057
3058 if (adapter->wol)
3059 be_setup_wol(adapter, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003060 return 0;
3061}
3062
Sathya Perla82456b02010-02-17 01:35:37 +00003063/*
3064 * An FLR will stop BE from DMAing any data.
3065 */
3066static void be_shutdown(struct pci_dev *pdev)
3067{
3068 struct be_adapter *adapter = pci_get_drvdata(pdev);
3069 struct net_device *netdev = adapter->netdev;
3070
3071 netif_device_detach(netdev);
3072
3073 be_cmd_reset_function(adapter);
3074
3075 if (adapter->wol)
3076 be_setup_wol(adapter, true);
3077
3078 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003079}
3080
Sathya Perlacf588472010-02-14 21:22:01 +00003081static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3082 pci_channel_state_t state)
3083{
3084 struct be_adapter *adapter = pci_get_drvdata(pdev);
3085 struct net_device *netdev = adapter->netdev;
3086
3087 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3088
3089 adapter->eeh_err = true;
3090
3091 netif_device_detach(netdev);
3092
3093 if (netif_running(netdev)) {
3094 rtnl_lock();
3095 be_close(netdev);
3096 rtnl_unlock();
3097 }
3098 be_clear(adapter);
3099
3100 if (state == pci_channel_io_perm_failure)
3101 return PCI_ERS_RESULT_DISCONNECT;
3102
3103 pci_disable_device(pdev);
3104
3105 return PCI_ERS_RESULT_NEED_RESET;
3106}
3107
3108static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3109{
3110 struct be_adapter *adapter = pci_get_drvdata(pdev);
3111 int status;
3112
3113 dev_info(&adapter->pdev->dev, "EEH reset\n");
3114 adapter->eeh_err = false;
3115
3116 status = pci_enable_device(pdev);
3117 if (status)
3118 return PCI_ERS_RESULT_DISCONNECT;
3119
3120 pci_set_master(pdev);
3121 pci_set_power_state(pdev, 0);
3122 pci_restore_state(pdev);
3123
3124 /* Check if card is ok and fw is ready */
3125 status = be_cmd_POST(adapter);
3126 if (status)
3127 return PCI_ERS_RESULT_DISCONNECT;
3128
3129 return PCI_ERS_RESULT_RECOVERED;
3130}
3131
3132static void be_eeh_resume(struct pci_dev *pdev)
3133{
3134 int status = 0;
3135 struct be_adapter *adapter = pci_get_drvdata(pdev);
3136 struct net_device *netdev = adapter->netdev;
3137
3138 dev_info(&adapter->pdev->dev, "EEH resume\n");
3139
3140 pci_save_state(pdev);
3141
3142 /* tell fw we're ready to fire cmds */
3143 status = be_cmd_fw_init(adapter);
3144 if (status)
3145 goto err;
3146
3147 status = be_setup(adapter);
3148 if (status)
3149 goto err;
3150
3151 if (netif_running(netdev)) {
3152 status = be_open(netdev);
3153 if (status)
3154 goto err;
3155 }
3156 netif_device_attach(netdev);
3157 return;
3158err:
3159 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003160}
3161
3162static struct pci_error_handlers be_eeh_handlers = {
3163 .error_detected = be_eeh_err_detected,
3164 .slot_reset = be_eeh_reset,
3165 .resume = be_eeh_resume,
3166};
3167
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003168static struct pci_driver be_driver = {
3169 .name = DRV_NAME,
3170 .id_table = be_dev_ids,
3171 .probe = be_probe,
3172 .remove = be_remove,
3173 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003174 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003175 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003176 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003177};
3178
3179static int __init be_init_module(void)
3180{
Joe Perches8e95a202009-12-03 07:58:21 +00003181 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3182 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003183 printk(KERN_WARNING DRV_NAME
3184 " : Module param rx_frag_size must be 2048/4096/8192."
3185 " Using 2048\n");
3186 rx_frag_size = 2048;
3187 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003188
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003189 if (num_vfs > 32) {
3190 printk(KERN_WARNING DRV_NAME
3191 " : Module param num_vfs must not be greater than 32."
3192 "Using 32\n");
3193 num_vfs = 32;
3194 }
3195
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196 return pci_register_driver(&be_driver);
3197}
3198module_init(be_init_module);
3199
3200static void __exit be_exit_module(void)
3201{
3202 pci_unregister_driver(&be_driver);
3203}
3204module_exit(be_exit_module);