blob: c4966d46f6922df9c92d94c2a32b696e41d10a3f [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030module_param(rx_frag_size, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
Sathya Perla3abcded2010-10-03 22:12:27 -0700119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130}
131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143 if (!mem->va)
144 return -1;
145 memset(mem->va, 0, mem->size);
146 return 0;
147}
148
Sathya Perla8788fdc2009-07-27 22:52:03 +0000149static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000151 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 u32 reg = ioread32(addr);
153 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perlacf588472010-02-14 21:22:01 +0000155 if (adapter->eeh_err)
156 return;
157
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700159 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 iowrite32(reg, addr);
166}
167
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169{
170 u32 val = 0;
171 val |= qid & DB_RQ_RING_ID_MASK;
172 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000173
174 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000175 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176}
177
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179{
180 u32 val = 0;
181 val |= qid & DB_TXULP_RING_ID_MASK;
182 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000183
184 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189 bool arm, bool clear_int, u16 num_popped)
190{
191 u32 val = 0;
192 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000193 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000195
196 if (adapter->eeh_err)
197 return;
198
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199 if (arm)
200 val |= 1 << DB_EQ_REARM_SHIFT;
201 if (clear_int)
202 val |= 1 << DB_EQ_CLR_SHIFT;
203 val |= 1 << DB_EQ_EVNT_SHIFT;
204 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000205 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209{
210 u32 val = 0;
211 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
215 if (adapter->eeh_err)
216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_CQ_REARM_SHIFT;
220 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000221 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222}
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224static int be_mac_addr_set(struct net_device *netdev, void *p)
225{
226 struct be_adapter *adapter = netdev_priv(netdev);
227 struct sockaddr *addr = p;
228 int status = 0;
229
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000230 if (!is_valid_ether_addr(addr->sa_data))
231 return -EADDRNOTAVAIL;
232
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000233 /* MAC addr configuration will be done in hardware for VFs
234 * by their corresponding PFs. Just copy to netdev addr here
235 */
236 if (!be_physfn(adapter))
237 goto netdev_addr;
238
Sathya Perlaa65027e2009-08-17 00:58:04 +0000239 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
240 if (status)
241 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242
Sathya Perlaa65027e2009-08-17 00:58:04 +0000243 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
244 adapter->if_handle, &adapter->pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000245netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246 if (!status)
247 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
248
249 return status;
250}
251
Sathya Perlab31c50a2009-09-17 10:30:13 -0700252void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253{
Sathya Perla3abcded2010-10-03 22:12:27 -0700254 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
256 struct be_port_rxf_stats *port_stats =
257 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700258 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000259 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700260 struct be_rx_obj *rxo;
261 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262
Sathya Perla3abcded2010-10-03 22:12:27 -0700263 memset(dev_stats, 0, sizeof(*dev_stats));
264 for_all_rx_queues(adapter, rxo, i) {
265 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
266 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
267 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
268 /* no space in linux buffers: best possible approximation */
269 dev_stats->rx_dropped +=
270 erx_stats->rx_drops_no_fragments[rxo->q.id];
271 }
272
273 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
274 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700275
276 /* bad pkts received */
277 dev_stats->rx_errors = port_stats->rx_crc_errors +
278 port_stats->rx_alignment_symbol_errors +
279 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000280 port_stats->rx_out_range_errors +
281 port_stats->rx_frame_too_long +
282 port_stats->rx_dropped_too_small +
283 port_stats->rx_dropped_too_short +
284 port_stats->rx_dropped_header_too_small +
285 port_stats->rx_dropped_tcp_length +
286 port_stats->rx_dropped_runt +
287 port_stats->rx_tcp_checksum_errs +
288 port_stats->rx_ip_checksum_errs +
289 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700290
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700291 /* detailed rx errors */
292 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000293 port_stats->rx_out_range_errors +
294 port_stats->rx_frame_too_long;
295
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700296 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
297
298 /* frame alignment errors */
299 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000300
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 /* receiver fifo overrun */
302 /* drops_no_pbuf is no per i/f, it's per BE card */
303 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
304 port_stats->rx_input_fifo_overflow +
305 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306}
307
Sathya Perla8788fdc2009-07-27 22:52:03 +0000308void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700309{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310 struct net_device *netdev = adapter->netdev;
311
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000313 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000314 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000315 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700316 netif_start_queue(netdev);
317 netif_carrier_on(netdev);
318 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000319 } else {
320 netif_stop_queue(netdev);
321 netif_carrier_off(netdev);
322 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700323 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000324 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700325 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700326}
327
328/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700329static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700330{
Sathya Perla3abcded2010-10-03 22:12:27 -0700331 struct be_eq_obj *rx_eq = &rxo->rx_eq;
332 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700333 ulong now = jiffies;
334 u32 eqd;
335
336 if (!rx_eq->enable_aic)
337 return;
338
339 /* Wrapped around */
340 if (time_before(now, stats->rx_fps_jiffies)) {
341 stats->rx_fps_jiffies = now;
342 return;
343 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700344
345 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700346 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700347 return;
348
Sathya Perla3abcded2010-10-03 22:12:27 -0700349 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700350 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700351
Sathya Perla4097f662009-03-24 16:40:13 -0700352 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700353 stats->prev_rx_frags = stats->rx_frags;
354 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700355 eqd = eqd << 3;
356 if (eqd > rx_eq->max_eqd)
357 eqd = rx_eq->max_eqd;
358 if (eqd < rx_eq->min_eqd)
359 eqd = rx_eq->min_eqd;
360 if (eqd < 10)
361 eqd = 0;
362 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000363 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700364
365 rx_eq->cur_eqd = eqd;
366}
367
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700368static u32 be_calc_rate(u64 bytes, unsigned long ticks)
369{
370 u64 rate = bytes;
371
372 do_div(rate, ticks / HZ);
373 rate <<= 3; /* bytes/sec -> bits/sec */
374 do_div(rate, 1000000ul); /* MB/Sec */
375
376 return rate;
377}
378
Sathya Perla4097f662009-03-24 16:40:13 -0700379static void be_tx_rate_update(struct be_adapter *adapter)
380{
Sathya Perla3abcded2010-10-03 22:12:27 -0700381 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700382 ulong now = jiffies;
383
384 /* Wrapped around? */
385 if (time_before(now, stats->be_tx_jiffies)) {
386 stats->be_tx_jiffies = now;
387 return;
388 }
389
390 /* Update tx rate once in two seconds */
391 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700392 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
393 - stats->be_tx_bytes_prev,
394 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700395 stats->be_tx_jiffies = now;
396 stats->be_tx_bytes_prev = stats->be_tx_bytes;
397 }
398}
399
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700400static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000401 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700402{
Sathya Perla3abcded2010-10-03 22:12:27 -0700403 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700404 stats->be_tx_reqs++;
405 stats->be_tx_wrbs += wrb_cnt;
406 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000407 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700408 if (stopped)
409 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700410}
411
412/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000413static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
414 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700415{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700416 int cnt = (skb->len > skb->data_len);
417
418 cnt += skb_shinfo(skb)->nr_frags;
419
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700420 /* to account for hdr wrb */
421 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000422 if (lancer_chip(adapter) || !(cnt & 1)) {
423 *dummy = false;
424 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700425 /* add a dummy to make it an even num */
426 cnt++;
427 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000428 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700429 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
430 return cnt;
431}
432
433static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
434{
435 wrb->frag_pa_hi = upper_32_bits(addr);
436 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
437 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
438}
439
Somnath Koturcc4ce022010-10-21 07:11:14 -0700440static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
441 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700442{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700443 u8 vlan_prio = 0;
444 u16 vlan_tag = 0;
445
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700446 memset(hdr, 0, sizeof(*hdr));
447
448 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
449
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000450 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
453 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000454 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000455 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000456 if (lancer_chip(adapter) && adapter->sli_family ==
457 LANCER_A0_SLI_FAMILY) {
458 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
459 if (is_tcp_pkt(skb))
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
461 tcpcs, hdr, 1);
462 else if (is_udp_pkt(skb))
463 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
464 udpcs, hdr, 1);
465 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700466 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
467 if (is_tcp_pkt(skb))
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
469 else if (is_udp_pkt(skb))
470 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
471 }
472
Somnath Koturcc4ce022010-10-21 07:11:14 -0700473 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700475 vlan_tag = vlan_tx_tag_get(skb);
476 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
477 /* If vlan priority provided by OS is NOT in available bmap */
478 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
479 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
480 adapter->recommended_prio;
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482 }
483
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
487 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
488}
489
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000490static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000491 bool unmap_single)
492{
493 dma_addr_t dma;
494
495 be_dws_le_to_cpu(wrb, sizeof(*wrb));
496
497 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000498 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000499 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000500 dma_unmap_single(dev, dma, wrb->frag_len,
501 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000502 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000503 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000504 }
505}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506
507static int make_tx_wrbs(struct be_adapter *adapter,
508 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
509{
Sathya Perla7101e112010-03-22 20:41:12 +0000510 dma_addr_t busaddr;
511 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000512 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513 struct sk_buff *first_skb = skb;
514 struct be_queue_info *txq = &adapter->tx_obj.q;
515 struct be_eth_wrb *wrb;
516 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000517 bool map_single = false;
518 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700519
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520 hdr = queue_head_node(txq);
521 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000522 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
David S. Millerebc8d2a2009-06-09 01:01:31 -0700524 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700525 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000526 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
527 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000528 goto dma_err;
529 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700530 wrb = queue_head_node(txq);
531 wrb_fill(wrb, busaddr, len);
532 be_dws_cpu_to_le(wrb, sizeof(*wrb));
533 queue_head_inc(txq);
534 copied += len;
535 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536
David S. Millerebc8d2a2009-06-09 01:01:31 -0700537 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
538 struct skb_frag_struct *frag =
539 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000540 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
541 frag->size, DMA_TO_DEVICE);
542 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000543 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700544 wrb = queue_head_node(txq);
545 wrb_fill(wrb, busaddr, frag->size);
546 be_dws_cpu_to_le(wrb, sizeof(*wrb));
547 queue_head_inc(txq);
548 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 }
550
551 if (dummy_wrb) {
552 wrb = queue_head_node(txq);
553 wrb_fill(wrb, 0, 0);
554 be_dws_cpu_to_le(wrb, sizeof(*wrb));
555 queue_head_inc(txq);
556 }
557
Somnath Koturcc4ce022010-10-21 07:11:14 -0700558 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700559 be_dws_cpu_to_le(hdr, sizeof(*hdr));
560
561 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000562dma_err:
563 txq->head = map_head;
564 while (copied) {
565 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000566 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000567 map_single = false;
568 copied -= wrb->frag_len;
569 queue_head_inc(txq);
570 }
571 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700572}
573
Stephen Hemminger613573252009-08-31 19:50:58 +0000574static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700575 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576{
577 struct be_adapter *adapter = netdev_priv(netdev);
578 struct be_tx_obj *tx_obj = &adapter->tx_obj;
579 struct be_queue_info *txq = &tx_obj->q;
580 u32 wrb_cnt = 0, copied = 0;
581 u32 start = txq->head;
582 bool dummy_wrb, stopped = false;
583
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000584 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585
586 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000587 if (copied) {
588 /* record the sent skb in the sent_skb table */
589 BUG_ON(tx_obj->sent_skb_list[start]);
590 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000592 /* Ensure txq has space for the next skb; Else stop the queue
593 * *BEFORE* ringing the tx doorbell, so that we serialze the
594 * tx compls of the current transmit which'll wake up the queue
595 */
Sathya Perla7101e112010-03-22 20:41:12 +0000596 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000597 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
598 txq->len) {
599 netif_stop_queue(netdev);
600 stopped = true;
601 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000603 be_txq_notify(adapter, txq->id, wrb_cnt);
604
Ajit Khaparde91992e42010-02-19 13:57:12 +0000605 be_tx_stats_update(adapter, wrb_cnt, copied,
606 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000607 } else {
608 txq->head = start;
609 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611 return NETDEV_TX_OK;
612}
613
614static int be_change_mtu(struct net_device *netdev, int new_mtu)
615{
616 struct be_adapter *adapter = netdev_priv(netdev);
617 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000618 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
619 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700620 dev_info(&adapter->pdev->dev,
621 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000622 BE_MIN_MTU,
623 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624 return -EINVAL;
625 }
626 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
627 netdev->mtu, new_mtu);
628 netdev->mtu = new_mtu;
629 return 0;
630}
631
632/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000633 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
634 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000636static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 u16 vtag[BE_NUM_VLANS_SUPPORTED];
639 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000640 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000641 u32 if_handle;
642
643 if (vf) {
644 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
645 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
646 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
647 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
Ajit Khaparde82903e42010-02-09 01:34:57 +0000649 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000651 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 if (adapter->vlan_tag[i]) {
653 vtag[ntags] = cpu_to_le16(i);
654 ntags++;
655 }
656 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700657 status = be_cmd_vlan_config(adapter, adapter->if_handle,
658 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700660 status = be_cmd_vlan_config(adapter, adapter->if_handle,
661 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000663
Sathya Perlab31c50a2009-09-17 10:30:13 -0700664 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665}
666
667static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
668{
669 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672}
673
674static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
675{
676 struct be_adapter *adapter = netdev_priv(netdev);
677
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000678 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000679 if (!be_physfn(adapter))
680 return;
681
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000683 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000684 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685}
686
687static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
688{
689 struct be_adapter *adapter = netdev_priv(netdev);
690
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000691 adapter->vlans_added--;
692 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
693
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000694 if (!be_physfn(adapter))
695 return;
696
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000698 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000699 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700}
701
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702static void be_set_multicast_list(struct net_device *netdev)
703{
704 struct be_adapter *adapter = netdev_priv(netdev);
705
706 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000707 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000708 adapter->promiscuous = true;
709 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000711
712 /* BE was previously in promiscous mode; disable it */
713 if (adapter->promiscuous) {
714 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000715 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000716 }
717
Sathya Perlae7b909a2009-11-22 22:01:10 +0000718 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000719 if (netdev->flags & IFF_ALLMULTI ||
720 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000721 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000722 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000723 goto done;
724 }
725
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000726 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800727 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000728done:
729 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730}
731
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000732static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
733{
734 struct be_adapter *adapter = netdev_priv(netdev);
735 int status;
736
737 if (!adapter->sriov_enabled)
738 return -EPERM;
739
740 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
741 return -EINVAL;
742
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000743 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
744 status = be_cmd_pmac_del(adapter,
745 adapter->vf_cfg[vf].vf_if_handle,
746 adapter->vf_cfg[vf].vf_pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000747
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000748 status = be_cmd_pmac_add(adapter, mac,
749 adapter->vf_cfg[vf].vf_if_handle,
750 &adapter->vf_cfg[vf].vf_pmac_id);
751
752 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000753 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
754 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000755 else
756 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
757
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000758 return status;
759}
760
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000761static int be_get_vf_config(struct net_device *netdev, int vf,
762 struct ifla_vf_info *vi)
763{
764 struct be_adapter *adapter = netdev_priv(netdev);
765
766 if (!adapter->sriov_enabled)
767 return -EPERM;
768
769 if (vf >= num_vfs)
770 return -EINVAL;
771
772 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000773 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000774 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000775 vi->qos = 0;
776 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
777
778 return 0;
779}
780
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000781static int be_set_vf_vlan(struct net_device *netdev,
782 int vf, u16 vlan, u8 qos)
783{
784 struct be_adapter *adapter = netdev_priv(netdev);
785 int status = 0;
786
787 if (!adapter->sriov_enabled)
788 return -EPERM;
789
790 if ((vf >= num_vfs) || (vlan > 4095))
791 return -EINVAL;
792
793 if (vlan) {
794 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
795 adapter->vlans_added++;
796 } else {
797 adapter->vf_cfg[vf].vf_vlan_tag = 0;
798 adapter->vlans_added--;
799 }
800
801 status = be_vid_config(adapter, true, vf);
802
803 if (status)
804 dev_info(&adapter->pdev->dev,
805 "VLAN %d config on VF %d failed\n", vlan, vf);
806 return status;
807}
808
Ajit Khapardee1d18732010-07-23 01:52:13 +0000809static int be_set_vf_tx_rate(struct net_device *netdev,
810 int vf, int rate)
811{
812 struct be_adapter *adapter = netdev_priv(netdev);
813 int status = 0;
814
815 if (!adapter->sriov_enabled)
816 return -EPERM;
817
818 if ((vf >= num_vfs) || (rate < 0))
819 return -EINVAL;
820
821 if (rate > 10000)
822 rate = 10000;
823
824 adapter->vf_cfg[vf].vf_tx_rate = rate;
825 status = be_cmd_set_qos(adapter, rate / 10, vf);
826
827 if (status)
828 dev_info(&adapter->pdev->dev,
829 "tx rate %d on VF %d failed\n", rate, vf);
830 return status;
831}
832
Sathya Perla3abcded2010-10-03 22:12:27 -0700833static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834{
Sathya Perla3abcded2010-10-03 22:12:27 -0700835 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700836 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837
Sathya Perla4097f662009-03-24 16:40:13 -0700838 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700839 if (time_before(now, stats->rx_jiffies)) {
840 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700841 return;
842 }
843
844 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700845 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700846 return;
847
Sathya Perla3abcded2010-10-03 22:12:27 -0700848 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
849 now - stats->rx_jiffies);
850 stats->rx_jiffies = now;
851 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700852}
853
Sathya Perla3abcded2010-10-03 22:12:27 -0700854static void be_rx_stats_update(struct be_rx_obj *rxo,
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000855 u32 pktsize, u16 numfrags, u8 pkt_type)
Sathya Perla4097f662009-03-24 16:40:13 -0700856{
Sathya Perla3abcded2010-10-03 22:12:27 -0700857 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700858
Sathya Perla3abcded2010-10-03 22:12:27 -0700859 stats->rx_compl++;
860 stats->rx_frags += numfrags;
861 stats->rx_bytes += pktsize;
862 stats->rx_pkts++;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000863 if (pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700864 stats->rx_mcast_pkts++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865}
866
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000867static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700868{
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000869 u8 l4_cksm, ipv6, ipcksm;
Ajit Khaparde728a9972009-04-13 15:41:22 -0700870
871 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
872 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000873 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700874
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000875 /* Ignore ipcksm for ipv6 pkts */
876 return l4_cksm && (ipcksm || ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700877}
878
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700880get_rx_page_info(struct be_adapter *adapter,
881 struct be_rx_obj *rxo,
882 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700883{
884 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700885 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886
Sathya Perla3abcded2010-10-03 22:12:27 -0700887 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888 BUG_ON(!rx_page_info->page);
889
Ajit Khaparde205859a2010-02-09 01:34:21 +0000890 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000891 dma_unmap_page(&adapter->pdev->dev,
892 dma_unmap_addr(rx_page_info, bus),
893 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000894 rx_page_info->last_page_user = false;
895 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896
897 atomic_dec(&rxq->used);
898 return rx_page_info;
899}
900
901/* Throwaway the data in the Rx completion */
902static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700903 struct be_rx_obj *rxo,
904 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700905{
Sathya Perla3abcded2010-10-03 22:12:27 -0700906 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 struct be_rx_page_info *page_info;
908 u16 rxq_idx, i, num_rcvd;
909
910 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
911 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
912
Sathya Perla64642812010-12-01 01:04:17 +0000913 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
914 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
915
916 rxo->last_frag_index = rxq_idx;
917
918 for (i = 0; i < num_rcvd; i++) {
919 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
920 put_page(page_info->page);
921 memset(page_info, 0, sizeof(*page_info));
922 index_inc(&rxq_idx, rxq->len);
923 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700924 }
925}
926
927/*
928 * skb_fill_rx_data forms a complete skb for an ether frame
929 * indicated by rxcp.
930 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700931static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla89420422010-02-17 01:35:26 +0000932 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
933 u16 num_rcvd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700934{
Sathya Perla3abcded2010-10-03 22:12:27 -0700935 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936 struct be_rx_page_info *page_info;
Sathya Perla89420422010-02-17 01:35:26 +0000937 u16 rxq_idx, i, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700938 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700939 u8 *start;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000940 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700941
942 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
943 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000944 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700945
Sathya Perla3abcded2010-10-03 22:12:27 -0700946 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700947
948 start = page_address(page_info->page) + page_info->page_offset;
949 prefetch(start);
950
951 /* Copy data in the first descriptor of this completion */
952 curr_frag_len = min(pktsize, rx_frag_size);
953
954 /* Copy the header portion into skb_data */
955 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
956 memcpy(skb->data, start, hdr_len);
957 skb->len = curr_frag_len;
958 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
959 /* Complete packet has now been moved to data */
960 put_page(page_info->page);
961 skb->data_len = 0;
962 skb->tail += curr_frag_len;
963 } else {
964 skb_shinfo(skb)->nr_frags = 1;
965 skb_shinfo(skb)->frags[0].page = page_info->page;
966 skb_shinfo(skb)->frags[0].page_offset =
967 page_info->page_offset + hdr_len;
968 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
969 skb->data_len = curr_frag_len - hdr_len;
970 skb->tail += hdr_len;
971 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000972 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700973
974 if (pktsize <= rx_frag_size) {
975 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000976 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700977 }
978
979 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700980 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000981 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700982 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700983 index_inc(&rxq_idx, rxq->len);
Sathya Perla3abcded2010-10-03 22:12:27 -0700984 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700985
Ajit Khapardefa774062009-07-22 09:28:55 -0700986 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000988 /* Coalesce all frags from the same physical page in one slot */
989 if (page_info->page_offset == 0) {
990 /* Fresh page */
991 j++;
992 skb_shinfo(skb)->frags[j].page = page_info->page;
993 skb_shinfo(skb)->frags[j].page_offset =
994 page_info->page_offset;
995 skb_shinfo(skb)->frags[j].size = 0;
996 skb_shinfo(skb)->nr_frags++;
997 } else {
998 put_page(page_info->page);
999 }
1000
1001 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002 skb->len += curr_frag_len;
1003 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001004
Ajit Khaparde205859a2010-02-09 01:34:21 +00001005 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001007 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008
Sathya Perla76fbb422009-06-10 02:21:56 +00001009done:
Sathya Perla3abcded2010-10-03 22:12:27 -07001010 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001011}
1012
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001013/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001015 struct be_rx_obj *rxo,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016 struct be_eth_rx_compl *rxcp)
1017{
1018 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001019 u32 vlanf, vid;
Sathya Perla89420422010-02-17 01:35:26 +00001020 u16 num_rcvd;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001021 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001022
Sathya Perla89420422010-02-17 01:35:26 +00001023 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
Sathya Perla89420422010-02-17 01:35:26 +00001024
Eric Dumazet89d71a62009-10-13 05:34:20 +00001025 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001026 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027 if (net_ratelimit())
1028 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001029 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030 return;
1031 }
1032
Sathya Perla3abcded2010-10-03 22:12:27 -07001033 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001034
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001035 if (likely(adapter->rx_csum && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001036 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001037 else
1038 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039
1040 skb->truesize = skb->len + sizeof(struct sk_buff);
1041 skb->protocol = eth_type_trans(skb, adapter->netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042
Sathya Perlaa058a632010-02-17 01:34:22 +00001043 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1044 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1045
1046 /* vlanf could be wrongly set in some cards.
1047 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001048 if ((adapter->function_mode & 0x400) && !vtm)
Sathya Perlaa058a632010-02-17 01:34:22 +00001049 vlanf = 0;
1050
1051 if (unlikely(vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001052 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053 kfree_skb(skb);
1054 return;
1055 }
1056 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001057 if (!lancer_chip(adapter))
1058 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1060 } else {
1061 netif_receive_skb(skb);
1062 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063}
1064
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001065/* Process the RX completion indicated by rxcp when GRO is enabled */
1066static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001067 struct be_rx_obj *rxo,
1068 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069{
1070 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001071 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001072 struct be_queue_info *rxq = &rxo->q;
1073 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001075 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001076 u8 vtm;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001077 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078
1079 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1080 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1081 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1082 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001083 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001084 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001085
1086 /* vlanf could be wrongly set in some cards.
1087 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001088 if ((adapter->function_mode & 0x400) && !vtm)
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001089 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001091 skb = napi_get_frags(&eq_obj->napi);
1092 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001093 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001094 return;
1095 }
1096
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001098 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001099 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100
1101 curr_frag_len = min(remaining, rx_frag_size);
1102
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001103 /* Coalesce all frags from the same physical page in one slot */
1104 if (i == 0 || page_info->page_offset == 0) {
1105 /* First frag or Fresh page */
1106 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001107 skb_shinfo(skb)->frags[j].page = page_info->page;
1108 skb_shinfo(skb)->frags[j].page_offset =
1109 page_info->page_offset;
1110 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001111 } else {
1112 put_page(page_info->page);
1113 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001114 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001115
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118 memset(page_info, 0, sizeof(*page_info));
1119 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001120 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001122 skb_shinfo(skb)->nr_frags = j + 1;
1123 skb->len = pkt_size;
1124 skb->data_len = pkt_size;
1125 skb->truesize += pkt_size;
1126 skb->ip_summed = CHECKSUM_UNNECESSARY;
1127
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001129 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 } else {
1131 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001132 if (!lancer_chip(adapter))
1133 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134
Ajit Khaparde82903e42010-02-09 01:34:57 +00001135 if (!adapter->vlan_grp || adapter->vlans_added == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136 return;
1137
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001138 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139 }
1140
Sathya Perla3abcded2010-10-03 22:12:27 -07001141 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142}
1143
Sathya Perla3abcded2010-10-03 22:12:27 -07001144static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145{
Sathya Perla3abcded2010-10-03 22:12:27 -07001146 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147
1148 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1149 return NULL;
1150
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001151 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1153
Sathya Perla3abcded2010-10-03 22:12:27 -07001154 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001155 return rxcp;
1156}
1157
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001158/* To reset the valid bit, we need to reset the whole word as
1159 * when walking the queue the valid entries are little-endian
1160 * and invalid entries are host endian
1161 */
1162static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1163{
1164 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1165}
1166
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001167static inline struct page *be_alloc_pages(u32 size)
1168{
1169 gfp_t alloc_flags = GFP_ATOMIC;
1170 u32 order = get_order(size);
1171 if (order > 0)
1172 alloc_flags |= __GFP_COMP;
1173 return alloc_pages(alloc_flags, order);
1174}
1175
1176/*
1177 * Allocate a page, split it to fragments of size rx_frag_size and post as
1178 * receive buffers to BE
1179 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001180static void be_post_rx_frags(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181{
Sathya Perla3abcded2010-10-03 22:12:27 -07001182 struct be_adapter *adapter = rxo->adapter;
1183 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001184 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001185 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186 struct page *pagep = NULL;
1187 struct be_eth_rx_d *rxd;
1188 u64 page_dmaaddr = 0, frag_dmaaddr;
1189 u32 posted, page_offset = 0;
1190
Sathya Perla3abcded2010-10-03 22:12:27 -07001191 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1193 if (!pagep) {
1194 pagep = be_alloc_pages(adapter->big_page_size);
1195 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001196 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197 break;
1198 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001199 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1200 0, adapter->big_page_size,
1201 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202 page_info->page_offset = 0;
1203 } else {
1204 get_page(pagep);
1205 page_info->page_offset = page_offset + rx_frag_size;
1206 }
1207 page_offset = page_info->page_offset;
1208 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001209 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1211
1212 rxd = queue_head_node(rxq);
1213 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1214 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215
1216 /* Any space left in the current big page for another frag? */
1217 if ((page_offset + rx_frag_size + rx_frag_size) >
1218 adapter->big_page_size) {
1219 pagep = NULL;
1220 page_info->last_page_user = true;
1221 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001222
1223 prev_page_info = page_info;
1224 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225 page_info = &page_info_tbl[rxq->head];
1226 }
1227 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001228 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229
1230 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001232 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001233 } else if (atomic_read(&rxq->used) == 0) {
1234 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001235 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001236 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237}
1238
Sathya Perla5fb379e2009-06-18 00:02:59 +00001239static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1242
1243 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1244 return NULL;
1245
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001246 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1248
1249 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1250
1251 queue_tail_inc(tx_cq);
1252 return txcp;
1253}
1254
1255static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1256{
1257 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001258 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1260 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001261 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1262 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001264 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001266 sent_skbs[txq->tail] = NULL;
1267
1268 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001269 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001271 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001273 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001274 unmap_tx_frag(&adapter->pdev->dev, wrb,
1275 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001276 unmap_skb_hdr = false;
1277
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 num_wrbs++;
1279 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001280 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001281
1282 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001283
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 kfree_skb(sent_skb);
1285}
1286
Sathya Perla859b1e42009-08-10 03:43:51 +00001287static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1288{
1289 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1290
1291 if (!eqe->evt)
1292 return NULL;
1293
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001294 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001295 eqe->evt = le32_to_cpu(eqe->evt);
1296 queue_tail_inc(&eq_obj->q);
1297 return eqe;
1298}
1299
1300static int event_handle(struct be_adapter *adapter,
1301 struct be_eq_obj *eq_obj)
1302{
1303 struct be_eq_entry *eqe;
1304 u16 num = 0;
1305
1306 while ((eqe = event_get(eq_obj)) != NULL) {
1307 eqe->evt = 0;
1308 num++;
1309 }
1310
1311 /* Deal with any spurious interrupts that come
1312 * without events
1313 */
1314 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1315 if (num)
1316 napi_schedule(&eq_obj->napi);
1317
1318 return num;
1319}
1320
1321/* Just read and notify events without processing them.
1322 * Used at the time of destroying event queues */
1323static void be_eq_clean(struct be_adapter *adapter,
1324 struct be_eq_obj *eq_obj)
1325{
1326 struct be_eq_entry *eqe;
1327 u16 num = 0;
1328
1329 while ((eqe = event_get(eq_obj)) != NULL) {
1330 eqe->evt = 0;
1331 num++;
1332 }
1333
1334 if (num)
1335 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1336}
1337
Sathya Perla3abcded2010-10-03 22:12:27 -07001338static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001339{
1340 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001341 struct be_queue_info *rxq = &rxo->q;
1342 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001343 struct be_eth_rx_compl *rxcp;
1344 u16 tail;
1345
1346 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001347 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1348 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001349 be_rx_compl_reset(rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001350 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001351 }
1352
1353 /* Then free posted rx buffer that were not used */
1354 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001355 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001356 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357 put_page(page_info->page);
1358 memset(page_info, 0, sizeof(*page_info));
1359 }
1360 BUG_ON(atomic_read(&rxq->used));
1361}
1362
Sathya Perlaa8e91792009-08-10 03:42:43 +00001363static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001365 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001367 struct be_eth_tx_compl *txcp;
1368 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001369 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1370 struct sk_buff *sent_skb;
1371 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372
Sathya Perlaa8e91792009-08-10 03:42:43 +00001373 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1374 do {
1375 while ((txcp = be_tx_compl_get(tx_cq))) {
1376 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1377 wrb_index, txcp);
1378 be_tx_compl_process(adapter, end_idx);
1379 cmpl++;
1380 }
1381 if (cmpl) {
1382 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1383 cmpl = 0;
1384 }
1385
1386 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1387 break;
1388
1389 mdelay(1);
1390 } while (true);
1391
1392 if (atomic_read(&txq->used))
1393 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1394 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001395
1396 /* free posted tx for which compls will never arrive */
1397 while (atomic_read(&txq->used)) {
1398 sent_skb = sent_skbs[txq->tail];
1399 end_idx = txq->tail;
1400 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001401 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1402 txq->len);
Sathya Perlab03388d2010-02-18 00:37:17 +00001403 be_tx_compl_process(adapter, end_idx);
1404 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405}
1406
Sathya Perla5fb379e2009-06-18 00:02:59 +00001407static void be_mcc_queues_destroy(struct be_adapter *adapter)
1408{
1409 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001410
Sathya Perla8788fdc2009-07-27 22:52:03 +00001411 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001412 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001413 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001414 be_queue_free(adapter, q);
1415
Sathya Perla8788fdc2009-07-27 22:52:03 +00001416 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001417 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001418 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001419 be_queue_free(adapter, q);
1420}
1421
1422/* Must be called only after TX qs are created as MCC shares TX EQ */
1423static int be_mcc_queues_create(struct be_adapter *adapter)
1424{
1425 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001426
1427 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001428 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001429 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001430 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001431 goto err;
1432
1433 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001434 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001435 goto mcc_cq_free;
1436
1437 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001438 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001439 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1440 goto mcc_cq_destroy;
1441
1442 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001443 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001444 goto mcc_q_free;
1445
1446 return 0;
1447
1448mcc_q_free:
1449 be_queue_free(adapter, q);
1450mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001451 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001452mcc_cq_free:
1453 be_queue_free(adapter, cq);
1454err:
1455 return -1;
1456}
1457
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458static void be_tx_queues_destroy(struct be_adapter *adapter)
1459{
1460 struct be_queue_info *q;
1461
1462 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001463 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001464 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465 be_queue_free(adapter, q);
1466
1467 q = &adapter->tx_obj.cq;
1468 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001469 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 be_queue_free(adapter, q);
1471
Sathya Perla859b1e42009-08-10 03:43:51 +00001472 /* Clear any residual events */
1473 be_eq_clean(adapter, &adapter->tx_eq);
1474
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475 q = &adapter->tx_eq.q;
1476 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001477 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 be_queue_free(adapter, q);
1479}
1480
1481static int be_tx_queues_create(struct be_adapter *adapter)
1482{
1483 struct be_queue_info *eq, *q, *cq;
1484
1485 adapter->tx_eq.max_eqd = 0;
1486 adapter->tx_eq.min_eqd = 0;
1487 adapter->tx_eq.cur_eqd = 96;
1488 adapter->tx_eq.enable_aic = false;
1489 /* Alloc Tx Event queue */
1490 eq = &adapter->tx_eq.q;
1491 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1492 return -1;
1493
1494 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001495 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001497
1498 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1499
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001500
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501 /* Alloc TX eth compl queue */
1502 cq = &adapter->tx_obj.cq;
1503 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1504 sizeof(struct be_eth_tx_compl)))
1505 goto tx_eq_destroy;
1506
1507 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001508 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 goto tx_cq_free;
1510
1511 /* Alloc TX eth queue */
1512 q = &adapter->tx_obj.q;
1513 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1514 goto tx_cq_destroy;
1515
1516 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001517 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518 goto tx_q_free;
1519 return 0;
1520
1521tx_q_free:
1522 be_queue_free(adapter, q);
1523tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001524 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525tx_cq_free:
1526 be_queue_free(adapter, cq);
1527tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001528 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529tx_eq_free:
1530 be_queue_free(adapter, eq);
1531 return -1;
1532}
1533
1534static void be_rx_queues_destroy(struct be_adapter *adapter)
1535{
1536 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001537 struct be_rx_obj *rxo;
1538 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539
Sathya Perla3abcded2010-10-03 22:12:27 -07001540 for_all_rx_queues(adapter, rxo, i) {
1541 q = &rxo->q;
1542 if (q->created) {
1543 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1544 /* After the rxq is invalidated, wait for a grace time
1545 * of 1ms for all dma to end and the flush compl to
1546 * arrive
1547 */
1548 mdelay(1);
1549 be_rx_q_clean(adapter, rxo);
1550 }
1551 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001552
Sathya Perla3abcded2010-10-03 22:12:27 -07001553 q = &rxo->cq;
1554 if (q->created)
1555 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1556 be_queue_free(adapter, q);
1557
1558 /* Clear any residual events */
1559 q = &rxo->rx_eq.q;
1560 if (q->created) {
1561 be_eq_clean(adapter, &rxo->rx_eq);
1562 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1563 }
1564 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566}
1567
1568static int be_rx_queues_create(struct be_adapter *adapter)
1569{
1570 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001571 struct be_rx_obj *rxo;
1572 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001575 for_all_rx_queues(adapter, rxo, i) {
1576 rxo->adapter = adapter;
Sathya Perla64642812010-12-01 01:04:17 +00001577 /* Init last_frag_index so that the frag index in the first
1578 * completion will never match */
1579 rxo->last_frag_index = 0xffff;
Sathya Perla3abcded2010-10-03 22:12:27 -07001580 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1581 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582
Sathya Perla3abcded2010-10-03 22:12:27 -07001583 /* EQ */
1584 eq = &rxo->rx_eq.q;
1585 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1586 sizeof(struct be_eq_entry));
1587 if (rc)
1588 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589
Sathya Perla3abcded2010-10-03 22:12:27 -07001590 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1591 if (rc)
1592 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001594 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1595
Sathya Perla3abcded2010-10-03 22:12:27 -07001596 /* CQ */
1597 cq = &rxo->cq;
1598 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1599 sizeof(struct be_eth_rx_compl));
1600 if (rc)
1601 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602
Sathya Perla3abcded2010-10-03 22:12:27 -07001603 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1604 if (rc)
1605 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001606 /* Rx Q */
1607 q = &rxo->q;
1608 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1609 sizeof(struct be_eth_rx_d));
1610 if (rc)
1611 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612
Sathya Perla3abcded2010-10-03 22:12:27 -07001613 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1614 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1615 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1616 if (rc)
1617 goto err;
1618 }
1619
1620 if (be_multi_rxq(adapter)) {
1621 u8 rsstable[MAX_RSS_QS];
1622
1623 for_all_rss_queues(adapter, rxo, i)
1624 rsstable[i] = rxo->rss_id;
1625
1626 rc = be_cmd_rss_config(adapter, rsstable,
1627 adapter->num_rx_qs - 1);
1628 if (rc)
1629 goto err;
1630 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631
1632 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001633err:
1634 be_rx_queues_destroy(adapter);
1635 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001638static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001639{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001640 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1641 if (!eqe->evt)
1642 return false;
1643 else
1644 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001645}
1646
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647static irqreturn_t be_intx(int irq, void *dev)
1648{
1649 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001650 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001651 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001653 if (lancer_chip(adapter)) {
1654 if (event_peek(&adapter->tx_eq))
1655 tx = event_handle(adapter, &adapter->tx_eq);
1656 for_all_rx_queues(adapter, rxo, i) {
1657 if (event_peek(&rxo->rx_eq))
1658 rx |= event_handle(adapter, &rxo->rx_eq);
1659 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001661 if (!(tx || rx))
1662 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001663
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001664 } else {
1665 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1666 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1667 if (!isr)
1668 return IRQ_NONE;
1669
1670 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1671 event_handle(adapter, &adapter->tx_eq);
1672
1673 for_all_rx_queues(adapter, rxo, i) {
1674 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1675 event_handle(adapter, &rxo->rx_eq);
1676 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001677 }
Sathya Perlac001c212009-07-01 01:06:07 +00001678
Sathya Perla8788fdc2009-07-27 22:52:03 +00001679 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680}
1681
1682static irqreturn_t be_msix_rx(int irq, void *dev)
1683{
Sathya Perla3abcded2010-10-03 22:12:27 -07001684 struct be_rx_obj *rxo = dev;
1685 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Sathya Perla3abcded2010-10-03 22:12:27 -07001687 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688
1689 return IRQ_HANDLED;
1690}
1691
Sathya Perla5fb379e2009-06-18 00:02:59 +00001692static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693{
1694 struct be_adapter *adapter = dev;
1695
Sathya Perla8788fdc2009-07-27 22:52:03 +00001696 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697
1698 return IRQ_HANDLED;
1699}
1700
Sathya Perla64642812010-12-01 01:04:17 +00001701static inline bool do_gro(struct be_rx_obj *rxo,
1702 struct be_eth_rx_compl *rxcp, u8 err)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1705
1706 if (err)
Sathya Perla3abcded2010-10-03 22:12:27 -07001707 rxo->stats.rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001709 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710}
1711
stephen hemminger49b05222010-10-21 07:50:48 +00001712static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713{
1714 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001715 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1716 struct be_adapter *adapter = rxo->adapter;
1717 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718 struct be_eth_rx_compl *rxcp;
1719 u32 work_done;
Sathya Perla64642812010-12-01 01:04:17 +00001720 u16 frag_index, num_rcvd;
1721 u8 err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
Sathya Perla3abcded2010-10-03 22:12:27 -07001723 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001725 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726 if (!rxcp)
1727 break;
1728
Sathya Perla64642812010-12-01 01:04:17 +00001729 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1730 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1731 rxcp);
1732 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1733 rxcp);
1734
1735 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1736 if (likely(frag_index != rxo->last_frag_index &&
1737 num_rcvd != 0)) {
1738 rxo->last_frag_index = frag_index;
1739
1740 if (do_gro(rxo, rxcp, err))
1741 be_rx_compl_process_gro(adapter, rxo, rxcp);
1742 else
1743 be_rx_compl_process(adapter, rxo, rxcp);
1744 }
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001745
1746 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 }
1748
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001750 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1751 be_post_rx_frags(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
1753 /* All consumed */
1754 if (work_done < budget) {
1755 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001756 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757 } else {
1758 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001759 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760 }
1761 return work_done;
1762}
1763
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001764/* As TX and MCC share the same EQ check for both TX and MCC completions.
1765 * For TX/MCC we don't honour budget; consume everything
1766 */
1767static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001769 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1770 struct be_adapter *adapter =
1771 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001772 struct be_queue_info *txq = &adapter->tx_obj.q;
1773 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001775 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776 u16 end_idx;
1777
Sathya Perla5fb379e2009-06-18 00:02:59 +00001778 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001780 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001782 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 }
1784
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001785 mcc_compl = be_process_mcc(adapter, &status);
1786
1787 napi_complete(napi);
1788
1789 if (mcc_compl) {
1790 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1791 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1792 }
1793
1794 if (tx_compl) {
1795 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001796
1797 /* As Tx wrbs have been freed up, wake up netdev queue if
1798 * it was stopped due to lack of tx wrbs.
1799 */
1800 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001802 netif_wake_queue(adapter->netdev);
1803 }
1804
Sathya Perla3abcded2010-10-03 22:12:27 -07001805 tx_stats(adapter)->be_tx_events++;
1806 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808
1809 return 1;
1810}
1811
Ajit Khaparded053de92010-09-03 06:23:30 +00001812void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001813{
1814 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1815 u32 i;
1816
1817 pci_read_config_dword(adapter->pdev,
1818 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1819 pci_read_config_dword(adapter->pdev,
1820 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1821 pci_read_config_dword(adapter->pdev,
1822 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1823 pci_read_config_dword(adapter->pdev,
1824 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1825
1826 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1827 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1828
Ajit Khaparded053de92010-09-03 06:23:30 +00001829 if (ue_status_lo || ue_status_hi) {
1830 adapter->ue_detected = true;
1831 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1832 }
1833
Ajit Khaparde7c185272010-07-29 06:16:33 +00001834 if (ue_status_lo) {
1835 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1836 if (ue_status_lo & 1)
1837 dev_err(&adapter->pdev->dev,
1838 "UE: %s bit set\n", ue_status_low_desc[i]);
1839 }
1840 }
1841 if (ue_status_hi) {
1842 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1843 if (ue_status_hi & 1)
1844 dev_err(&adapter->pdev->dev,
1845 "UE: %s bit set\n", ue_status_hi_desc[i]);
1846 }
1847 }
1848
1849}
1850
Sathya Perlaea1dae12009-03-19 23:56:20 -07001851static void be_worker(struct work_struct *work)
1852{
1853 struct be_adapter *adapter =
1854 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001855 struct be_rx_obj *rxo;
1856 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001857
Somnath Koturf203af72010-10-25 23:01:03 +00001858 /* when interrupts are not yet enabled, just reap any pending
1859 * mcc completions */
1860 if (!netif_running(adapter->netdev)) {
1861 int mcc_compl, status = 0;
1862
1863 mcc_compl = be_process_mcc(adapter, &status);
1864
1865 if (mcc_compl) {
1866 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1867 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1868 }
1869 goto reschedule;
1870 }
1871
Ajit Khaparde0fc48c32010-07-29 06:18:58 +00001872 if (!adapter->stats_ioctl_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001873 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001874
Sathya Perla4097f662009-03-24 16:40:13 -07001875 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001876
Sathya Perla3abcded2010-10-03 22:12:27 -07001877 for_all_rx_queues(adapter, rxo, i) {
1878 be_rx_rate_update(rxo);
1879 be_rx_eqd_update(adapter, rxo);
1880
1881 if (rxo->rx_post_starved) {
1882 rxo->rx_post_starved = false;
1883 be_post_rx_frags(rxo);
1884 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001885 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001886 if (!adapter->ue_detected && !lancer_chip(adapter))
Ajit Khaparded053de92010-09-03 06:23:30 +00001887 be_detect_dump_ue(adapter);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001888
Somnath Koturf203af72010-10-25 23:01:03 +00001889reschedule:
Sathya Perlaea1dae12009-03-19 23:56:20 -07001890 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1891}
1892
Sathya Perla8d56ff12009-11-22 22:02:26 +00001893static void be_msix_disable(struct be_adapter *adapter)
1894{
1895 if (adapter->msix_enabled) {
1896 pci_disable_msix(adapter->pdev);
1897 adapter->msix_enabled = false;
1898 }
1899}
1900
Sathya Perla3abcded2010-10-03 22:12:27 -07001901static int be_num_rxqs_get(struct be_adapter *adapter)
1902{
1903 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1904 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1905 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1906 } else {
1907 dev_warn(&adapter->pdev->dev,
1908 "No support for multiple RX queues\n");
1909 return 1;
1910 }
1911}
1912
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913static void be_msix_enable(struct be_adapter *adapter)
1914{
Sathya Perla3abcded2010-10-03 22:12:27 -07001915#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 int i, status;
1917
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1919
1920 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921 adapter->msix_entries[i].entry = i;
1922
1923 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perla3abcded2010-10-03 22:12:27 -07001924 adapter->num_rx_qs + 1);
1925 if (status == 0) {
1926 goto done;
1927 } else if (status >= BE_MIN_MSIX_VECTORS) {
1928 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1929 status) == 0) {
1930 adapter->num_rx_qs = status - 1;
1931 dev_warn(&adapter->pdev->dev,
1932 "Could alloc only %d MSIx vectors. "
1933 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1934 goto done;
1935 }
1936 }
1937 return;
1938done:
1939 adapter->msix_enabled = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940}
1941
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001942static void be_sriov_enable(struct be_adapter *adapter)
1943{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001944 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001945#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001946 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001947 int status;
1948
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001949 status = pci_enable_sriov(adapter->pdev, num_vfs);
1950 adapter->sriov_enabled = status ? false : true;
1951 }
1952#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001953}
1954
1955static void be_sriov_disable(struct be_adapter *adapter)
1956{
1957#ifdef CONFIG_PCI_IOV
1958 if (adapter->sriov_enabled) {
1959 pci_disable_sriov(adapter->pdev);
1960 adapter->sriov_enabled = false;
1961 }
1962#endif
1963}
1964
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001965static inline int be_msix_vec_get(struct be_adapter *adapter,
1966 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001968 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00001969}
1970
1971static int be_request_irq(struct be_adapter *adapter,
1972 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001973 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001974{
1975 struct net_device *netdev = adapter->netdev;
1976 int vec;
1977
1978 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001979 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001980 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00001981}
1982
Sathya Perla3abcded2010-10-03 22:12:27 -07001983static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1984 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001985{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001986 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001987 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988}
1989
1990static int be_msix_register(struct be_adapter *adapter)
1991{
Sathya Perla3abcded2010-10-03 22:12:27 -07001992 struct be_rx_obj *rxo;
1993 int status, i;
1994 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995
Sathya Perla3abcded2010-10-03 22:12:27 -07001996 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1997 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998 if (status)
1999 goto err;
2000
Sathya Perla3abcded2010-10-03 22:12:27 -07002001 for_all_rx_queues(adapter, rxo, i) {
2002 sprintf(qname, "rxq%d", i);
2003 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2004 qname, rxo);
2005 if (status)
2006 goto err_msix;
2007 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002008
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002010
Sathya Perla3abcded2010-10-03 22:12:27 -07002011err_msix:
2012 be_free_irq(adapter, &adapter->tx_eq, adapter);
2013
2014 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2015 be_free_irq(adapter, &rxo->rx_eq, rxo);
2016
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017err:
2018 dev_warn(&adapter->pdev->dev,
2019 "MSIX Request IRQ failed - err %d\n", status);
2020 pci_disable_msix(adapter->pdev);
2021 adapter->msix_enabled = false;
2022 return status;
2023}
2024
2025static int be_irq_register(struct be_adapter *adapter)
2026{
2027 struct net_device *netdev = adapter->netdev;
2028 int status;
2029
2030 if (adapter->msix_enabled) {
2031 status = be_msix_register(adapter);
2032 if (status == 0)
2033 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002034 /* INTx is not supported for VF */
2035 if (!be_physfn(adapter))
2036 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037 }
2038
2039 /* INTx */
2040 netdev->irq = adapter->pdev->irq;
2041 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2042 adapter);
2043 if (status) {
2044 dev_err(&adapter->pdev->dev,
2045 "INTx request IRQ failed - err %d\n", status);
2046 return status;
2047 }
2048done:
2049 adapter->isr_registered = true;
2050 return 0;
2051}
2052
2053static void be_irq_unregister(struct be_adapter *adapter)
2054{
2055 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002056 struct be_rx_obj *rxo;
2057 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058
2059 if (!adapter->isr_registered)
2060 return;
2061
2062 /* INTx */
2063 if (!adapter->msix_enabled) {
2064 free_irq(netdev->irq, adapter);
2065 goto done;
2066 }
2067
2068 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002069 be_free_irq(adapter, &adapter->tx_eq, adapter);
2070
2071 for_all_rx_queues(adapter, rxo, i)
2072 be_free_irq(adapter, &rxo->rx_eq, rxo);
2073
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074done:
2075 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076}
2077
Sathya Perla889cd4b2010-05-30 23:33:45 +00002078static int be_close(struct net_device *netdev)
2079{
2080 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002081 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002082 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002083 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002084
Sathya Perla889cd4b2010-05-30 23:33:45 +00002085 be_async_mcc_disable(adapter);
2086
2087 netif_stop_queue(netdev);
2088 netif_carrier_off(netdev);
2089 adapter->link_up = false;
2090
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002091 if (!lancer_chip(adapter))
2092 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002093
2094 if (adapter->msix_enabled) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002095 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002096 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002097
2098 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002099 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002100 synchronize_irq(vec);
2101 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002102 } else {
2103 synchronize_irq(netdev->irq);
2104 }
2105 be_irq_unregister(adapter);
2106
Sathya Perla3abcded2010-10-03 22:12:27 -07002107 for_all_rx_queues(adapter, rxo, i)
2108 napi_disable(&rxo->rx_eq.napi);
2109
Sathya Perla889cd4b2010-05-30 23:33:45 +00002110 napi_disable(&tx_eq->napi);
2111
2112 /* Wait for all pending tx completions to arrive so that
2113 * all tx skbs are freed.
2114 */
2115 be_tx_compl_clean(adapter);
2116
2117 return 0;
2118}
2119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120static int be_open(struct net_device *netdev)
2121{
2122 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002124 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002125 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002126 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002127 u8 mac_speed;
2128 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002129
Sathya Perla3abcded2010-10-03 22:12:27 -07002130 for_all_rx_queues(adapter, rxo, i) {
2131 be_post_rx_frags(rxo);
2132 napi_enable(&rxo->rx_eq.napi);
2133 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002134 napi_enable(&tx_eq->napi);
2135
2136 be_irq_register(adapter);
2137
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002138 if (!lancer_chip(adapter))
2139 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002140
2141 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002142 for_all_rx_queues(adapter, rxo, i) {
2143 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2144 be_cq_notify(adapter, rxo->cq.id, true, 0);
2145 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002146 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002147
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002148 /* Now that interrupts are on we can process async mcc */
2149 be_async_mcc_enable(adapter);
2150
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002151 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2152 &link_speed);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002153 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002154 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002155 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002156
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002157 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002158 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002159 if (status)
2160 goto err;
2161
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002162 status = be_cmd_set_flow_control(adapter,
2163 adapter->tx_fc, adapter->rx_fc);
2164 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002165 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002166 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002167
Sathya Perla889cd4b2010-05-30 23:33:45 +00002168 return 0;
2169err:
2170 be_close(adapter->netdev);
2171 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002172}
2173
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002174static int be_setup_wol(struct be_adapter *adapter, bool enable)
2175{
2176 struct be_dma_mem cmd;
2177 int status = 0;
2178 u8 mac[ETH_ALEN];
2179
2180 memset(mac, 0, ETH_ALEN);
2181
2182 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002183 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2184 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002185 if (cmd.va == NULL)
2186 return -1;
2187 memset(cmd.va, 0, cmd.size);
2188
2189 if (enable) {
2190 status = pci_write_config_dword(adapter->pdev,
2191 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2192 if (status) {
2193 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002194 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002195 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2196 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002197 return status;
2198 }
2199 status = be_cmd_enable_magic_wol(adapter,
2200 adapter->netdev->dev_addr, &cmd);
2201 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2202 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2203 } else {
2204 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2205 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2206 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2207 }
2208
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002209 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002210 return status;
2211}
2212
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002213/*
2214 * Generate a seed MAC address from the PF MAC Address using jhash.
2215 * MAC Address for VFs are assigned incrementally starting from the seed.
2216 * These addresses are programmed in the ASIC by the PF and the VF driver
2217 * queries for the MAC address during its probe.
2218 */
2219static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2220{
2221 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002222 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002223 u8 mac[ETH_ALEN];
2224
2225 be_vf_eth_addr_generate(adapter, mac);
2226
2227 for (vf = 0; vf < num_vfs; vf++) {
2228 status = be_cmd_pmac_add(adapter, mac,
2229 adapter->vf_cfg[vf].vf_if_handle,
2230 &adapter->vf_cfg[vf].vf_pmac_id);
2231 if (status)
2232 dev_err(&adapter->pdev->dev,
2233 "Mac address add failed for VF %d\n", vf);
2234 else
2235 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2236
2237 mac[5] += 1;
2238 }
2239 return status;
2240}
2241
2242static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2243{
2244 u32 vf;
2245
2246 for (vf = 0; vf < num_vfs; vf++) {
2247 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2248 be_cmd_pmac_del(adapter,
2249 adapter->vf_cfg[vf].vf_if_handle,
2250 adapter->vf_cfg[vf].vf_pmac_id);
2251 }
2252}
2253
Sathya Perla5fb379e2009-06-18 00:02:59 +00002254static int be_setup(struct be_adapter *adapter)
2255{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002256 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002257 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002259 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002261 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2262
2263 if (be_physfn(adapter)) {
2264 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2265 BE_IF_FLAGS_PROMISCUOUS |
2266 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2267 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002268
2269 if (be_multi_rxq(adapter)) {
2270 cap_flags |= BE_IF_FLAGS_RSS;
2271 en_flags |= BE_IF_FLAGS_RSS;
2272 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002273 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002274
2275 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2276 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002277 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002278 if (status != 0)
2279 goto do_none;
2280
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002281 if (be_physfn(adapter)) {
2282 while (vf < num_vfs) {
2283 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2284 | BE_IF_FLAGS_BROADCAST;
2285 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002286 mac, true,
2287 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002288 NULL, vf+1);
2289 if (status) {
2290 dev_err(&adapter->pdev->dev,
2291 "Interface Create failed for VF %d\n", vf);
2292 goto if_destroy;
2293 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002294 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002295 vf++;
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002296 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002297 } else if (!be_physfn(adapter)) {
2298 status = be_cmd_mac_addr_query(adapter, mac,
2299 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2300 if (!status) {
2301 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2302 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2303 }
2304 }
2305
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306 status = be_tx_queues_create(adapter);
2307 if (status != 0)
2308 goto if_destroy;
2309
2310 status = be_rx_queues_create(adapter);
2311 if (status != 0)
2312 goto tx_qs_destroy;
2313
Sathya Perla5fb379e2009-06-18 00:02:59 +00002314 status = be_mcc_queues_create(adapter);
2315 if (status != 0)
2316 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002317
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002318 if (be_physfn(adapter)) {
2319 status = be_vf_eth_addr_config(adapter);
2320 if (status)
2321 goto mcc_q_destroy;
2322 }
2323
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002324 adapter->link_speed = -1;
2325
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326 return 0;
2327
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002328mcc_q_destroy:
2329 if (be_physfn(adapter))
2330 be_vf_eth_addr_rem(adapter);
2331 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002332rx_qs_destroy:
2333 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334tx_qs_destroy:
2335 be_tx_queues_destroy(adapter);
2336if_destroy:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002337 for (vf = 0; vf < num_vfs; vf++)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002338 if (adapter->vf_cfg[vf].vf_if_handle)
2339 be_cmd_if_destroy(adapter,
2340 adapter->vf_cfg[vf].vf_if_handle);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002341 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342do_none:
2343 return status;
2344}
2345
Sathya Perla5fb379e2009-06-18 00:02:59 +00002346static int be_clear(struct be_adapter *adapter)
2347{
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002348 if (be_physfn(adapter))
2349 be_vf_eth_addr_rem(adapter);
2350
Sathya Perla1a8887d2009-08-17 00:58:41 +00002351 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002352 be_rx_queues_destroy(adapter);
2353 be_tx_queues_destroy(adapter);
2354
Sathya Perla8788fdc2009-07-27 22:52:03 +00002355 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002356
Sathya Perla2243e2e2009-11-22 22:02:03 +00002357 /* tell fw we're done with firing cmds */
2358 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002359 return 0;
2360}
2361
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362
Ajit Khaparde84517482009-09-04 03:12:16 +00002363#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002364static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002365 const u8 *p, u32 img_start, int image_size,
2366 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002367{
2368 u32 crc_offset;
2369 u8 flashed_crc[4];
2370 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002371
2372 crc_offset = hdr_size + img_start + image_size - 4;
2373
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002374 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002375
2376 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002377 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002378 if (status) {
2379 dev_err(&adapter->pdev->dev,
2380 "could not get crc from flash, not flashing redboot\n");
2381 return false;
2382 }
2383
2384 /*update redboot only if crc does not match*/
2385 if (!memcmp(flashed_crc, p, 4))
2386 return false;
2387 else
2388 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002389}
2390
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002391static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002392 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002393 struct be_dma_mem *flash_cmd, int num_of_images)
2394
Ajit Khaparde84517482009-09-04 03:12:16 +00002395{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002396 int status = 0, i, filehdr_size = 0;
2397 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002398 int num_bytes;
2399 const u8 *p = fw->data;
2400 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002401 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002402 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002403
Joe Perches215faf92010-12-21 02:16:10 -08002404 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002405 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2406 FLASH_IMAGE_MAX_SIZE_g3},
2407 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2408 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2409 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2410 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2412 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2414 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2415 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2416 FLASH_IMAGE_MAX_SIZE_g3},
2417 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2418 FLASH_IMAGE_MAX_SIZE_g3},
2419 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002420 FLASH_IMAGE_MAX_SIZE_g3},
2421 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2422 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002423 };
Joe Perches215faf92010-12-21 02:16:10 -08002424 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002425 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2426 FLASH_IMAGE_MAX_SIZE_g2},
2427 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2428 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2429 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2430 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2432 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2434 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2435 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2436 FLASH_IMAGE_MAX_SIZE_g2},
2437 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2438 FLASH_IMAGE_MAX_SIZE_g2},
2439 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2440 FLASH_IMAGE_MAX_SIZE_g2}
2441 };
2442
2443 if (adapter->generation == BE_GEN3) {
2444 pflashcomp = gen3_flash_types;
2445 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002446 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002447 } else {
2448 pflashcomp = gen2_flash_types;
2449 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002450 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002451 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002452 for (i = 0; i < num_comp; i++) {
2453 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2454 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2455 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002456 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2457 (!be_flash_redboot(adapter, fw->data,
2458 pflashcomp[i].offset, pflashcomp[i].size,
2459 filehdr_size)))
2460 continue;
2461 p = fw->data;
2462 p += filehdr_size + pflashcomp[i].offset
2463 + (num_of_images * sizeof(struct image_hdr));
2464 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002465 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002466 total_bytes = pflashcomp[i].size;
2467 while (total_bytes) {
2468 if (total_bytes > 32*1024)
2469 num_bytes = 32*1024;
2470 else
2471 num_bytes = total_bytes;
2472 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002473
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002474 if (!total_bytes)
2475 flash_op = FLASHROM_OPER_FLASH;
2476 else
2477 flash_op = FLASHROM_OPER_SAVE;
2478 memcpy(req->params.data_buf, p, num_bytes);
2479 p += num_bytes;
2480 status = be_cmd_write_flashrom(adapter, flash_cmd,
2481 pflashcomp[i].optype, flash_op, num_bytes);
2482 if (status) {
2483 dev_err(&adapter->pdev->dev,
2484 "cmd to write to flash rom failed.\n");
2485 return -1;
2486 }
2487 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002488 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002489 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002490 return 0;
2491}
2492
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002493static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2494{
2495 if (fhdr == NULL)
2496 return 0;
2497 if (fhdr->build[0] == '3')
2498 return BE_GEN3;
2499 else if (fhdr->build[0] == '2')
2500 return BE_GEN2;
2501 else
2502 return 0;
2503}
2504
Ajit Khaparde84517482009-09-04 03:12:16 +00002505int be_load_fw(struct be_adapter *adapter, u8 *func)
2506{
2507 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2508 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002509 struct flash_file_hdr_g2 *fhdr;
2510 struct flash_file_hdr_g3 *fhdr3;
2511 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002512 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002513 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002514 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002515
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002516 if (!netif_running(adapter->netdev)) {
2517 dev_err(&adapter->pdev->dev,
2518 "Firmware load not allowed (interface is down)\n");
2519 return -EPERM;
2520 }
2521
Ajit Khaparde84517482009-09-04 03:12:16 +00002522 strcpy(fw_file, func);
2523
2524 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2525 if (status)
2526 goto fw_exit;
2527
2528 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002529 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002530 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2531
Ajit Khaparde84517482009-09-04 03:12:16 +00002532 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002533 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2534 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002535 if (!flash_cmd.va) {
2536 status = -ENOMEM;
2537 dev_err(&adapter->pdev->dev,
2538 "Memory allocation failure while flashing\n");
2539 goto fw_exit;
2540 }
2541
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002542 if ((adapter->generation == BE_GEN3) &&
2543 (get_ufigen_type(fhdr) == BE_GEN3)) {
2544 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002545 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2546 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002547 img_hdr_ptr = (struct image_hdr *) (fw->data +
2548 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002549 i * sizeof(struct image_hdr)));
2550 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2551 status = be_flash_data(adapter, fw, &flash_cmd,
2552 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002553 }
2554 } else if ((adapter->generation == BE_GEN2) &&
2555 (get_ufigen_type(fhdr) == BE_GEN2)) {
2556 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2557 } else {
2558 dev_err(&adapter->pdev->dev,
2559 "UFI and Interface are not compatible for flashing\n");
2560 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002561 }
2562
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002563 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2564 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002565 if (status) {
2566 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2567 goto fw_exit;
2568 }
2569
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002570 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002571
2572fw_exit:
2573 release_firmware(fw);
2574 return status;
2575}
2576
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002577static struct net_device_ops be_netdev_ops = {
2578 .ndo_open = be_open,
2579 .ndo_stop = be_close,
2580 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002581 .ndo_set_rx_mode = be_set_multicast_list,
2582 .ndo_set_mac_address = be_mac_addr_set,
2583 .ndo_change_mtu = be_change_mtu,
2584 .ndo_validate_addr = eth_validate_addr,
2585 .ndo_vlan_rx_register = be_vlan_register,
2586 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2587 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002588 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002589 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002590 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002591 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002592};
2593
2594static void be_netdev_init(struct net_device *netdev)
2595{
2596 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002597 struct be_rx_obj *rxo;
2598 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002599
2600 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Michał Mirosław79032642010-11-30 06:38:00 +00002601 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2602 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ajit Khaparde49e4b8472010-06-14 04:56:07 +00002603 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002604
Michał Mirosław79032642010-11-30 06:38:00 +00002605 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2606 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002607
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002608 if (lancer_chip(adapter))
2609 netdev->vlan_features |= NETIF_F_TSO6;
2610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002611 netdev->flags |= IFF_MULTICAST;
2612
Ajit Khaparde728a9972009-04-13 15:41:22 -07002613 adapter->rx_csum = true;
2614
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002615 /* Default settings for Rx and Tx flow control */
2616 adapter->rx_fc = true;
2617 adapter->tx_fc = true;
2618
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002619 netif_set_gso_max_size(netdev, 65535);
2620
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002621 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2622
2623 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2624
Sathya Perla3abcded2010-10-03 22:12:27 -07002625 for_all_rx_queues(adapter, rxo, i)
2626 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2627 BE_NAPI_WEIGHT);
2628
Sathya Perla5fb379e2009-06-18 00:02:59 +00002629 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630 BE_NAPI_WEIGHT);
2631
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002632 netif_stop_queue(netdev);
2633}
2634
2635static void be_unmap_pci_bars(struct be_adapter *adapter)
2636{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002637 if (adapter->csr)
2638 iounmap(adapter->csr);
2639 if (adapter->db)
2640 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002641 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002642 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002643}
2644
2645static int be_map_pci_bars(struct be_adapter *adapter)
2646{
2647 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002648 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002649
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002650 if (lancer_chip(adapter)) {
2651 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2652 pci_resource_len(adapter->pdev, 0));
2653 if (addr == NULL)
2654 return -ENOMEM;
2655 adapter->db = addr;
2656 return 0;
2657 }
2658
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002659 if (be_physfn(adapter)) {
2660 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2661 pci_resource_len(adapter->pdev, 2));
2662 if (addr == NULL)
2663 return -ENOMEM;
2664 adapter->csr = addr;
2665 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002666
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002667 if (adapter->generation == BE_GEN2) {
2668 pcicfg_reg = 1;
2669 db_reg = 4;
2670 } else {
2671 pcicfg_reg = 0;
2672 if (be_physfn(adapter))
2673 db_reg = 4;
2674 else
2675 db_reg = 0;
2676 }
2677 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2678 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002679 if (addr == NULL)
2680 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002681 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002682
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002683 if (be_physfn(adapter)) {
2684 addr = ioremap_nocache(
2685 pci_resource_start(adapter->pdev, pcicfg_reg),
2686 pci_resource_len(adapter->pdev, pcicfg_reg));
2687 if (addr == NULL)
2688 goto pci_map_err;
2689 adapter->pcicfg = addr;
2690 } else
2691 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692
2693 return 0;
2694pci_map_err:
2695 be_unmap_pci_bars(adapter);
2696 return -ENOMEM;
2697}
2698
2699
2700static void be_ctrl_cleanup(struct be_adapter *adapter)
2701{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002702 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703
2704 be_unmap_pci_bars(adapter);
2705
2706 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002707 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2708 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002709
2710 mem = &adapter->mc_cmd_mem;
2711 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002712 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2713 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002714}
2715
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002716static int be_ctrl_init(struct be_adapter *adapter)
2717{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002718 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2719 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002720 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002721 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002722
2723 status = be_map_pci_bars(adapter);
2724 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002725 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002726
2727 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002728 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2729 mbox_mem_alloc->size,
2730 &mbox_mem_alloc->dma,
2731 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002732 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002733 status = -ENOMEM;
2734 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002735 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002736
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002737 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2738 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2739 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2740 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002741
2742 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002743 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2744 mc_cmd_mem->size, &mc_cmd_mem->dma,
2745 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002746 if (mc_cmd_mem->va == NULL) {
2747 status = -ENOMEM;
2748 goto free_mbox;
2749 }
2750 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2751
Ivan Vecera29849612010-12-14 05:43:19 +00002752 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002753 spin_lock_init(&adapter->mcc_lock);
2754 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002755
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002756 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002757 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002758 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002759
2760free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002761 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2762 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002763
2764unmap_pci_bars:
2765 be_unmap_pci_bars(adapter);
2766
2767done:
2768 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002769}
2770
2771static void be_stats_cleanup(struct be_adapter *adapter)
2772{
Sathya Perla3abcded2010-10-03 22:12:27 -07002773 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002774
2775 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002776 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2777 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002778}
2779
2780static int be_stats_init(struct be_adapter *adapter)
2781{
Sathya Perla3abcded2010-10-03 22:12:27 -07002782 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002783
2784 cmd->size = sizeof(struct be_cmd_req_get_stats);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002785 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2786 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002787 if (cmd->va == NULL)
2788 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002789 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002790 return 0;
2791}
2792
2793static void __devexit be_remove(struct pci_dev *pdev)
2794{
2795 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002796
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002797 if (!adapter)
2798 return;
2799
Somnath Koturf203af72010-10-25 23:01:03 +00002800 cancel_delayed_work_sync(&adapter->work);
2801
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002802 unregister_netdev(adapter->netdev);
2803
Sathya Perla5fb379e2009-06-18 00:02:59 +00002804 be_clear(adapter);
2805
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002806 be_stats_cleanup(adapter);
2807
2808 be_ctrl_cleanup(adapter);
2809
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002810 be_sriov_disable(adapter);
2811
Sathya Perla8d56ff12009-11-22 22:02:26 +00002812 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002813
2814 pci_set_drvdata(pdev, NULL);
2815 pci_release_regions(pdev);
2816 pci_disable_device(pdev);
2817
2818 free_netdev(adapter->netdev);
2819}
2820
Sathya Perla2243e2e2009-11-22 22:02:03 +00002821static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002822{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002823 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002824 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002825
Sathya Perla8788fdc2009-07-27 22:52:03 +00002826 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002827 if (status)
2828 return status;
2829
Sathya Perla3abcded2010-10-03 22:12:27 -07002830 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2831 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002832 if (status)
2833 return status;
2834
2835 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002836
2837 if (be_physfn(adapter)) {
2838 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002839 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002840
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002841 if (status)
2842 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002843
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002844 if (!is_valid_ether_addr(mac))
2845 return -EADDRNOTAVAIL;
2846
2847 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2848 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2849 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002850
Ajit Khaparde3486be22010-07-23 02:04:54 +00002851 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002852 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2853 else
2854 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2855
Sathya Perla2243e2e2009-11-22 22:02:03 +00002856 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002857}
2858
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002859static int be_dev_family_check(struct be_adapter *adapter)
2860{
2861 struct pci_dev *pdev = adapter->pdev;
2862 u32 sli_intf = 0, if_type;
2863
2864 switch (pdev->device) {
2865 case BE_DEVICE_ID1:
2866 case OC_DEVICE_ID1:
2867 adapter->generation = BE_GEN2;
2868 break;
2869 case BE_DEVICE_ID2:
2870 case OC_DEVICE_ID2:
2871 adapter->generation = BE_GEN3;
2872 break;
2873 case OC_DEVICE_ID3:
2874 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2875 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2876 SLI_INTF_IF_TYPE_SHIFT;
2877
2878 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2879 if_type != 0x02) {
2880 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2881 return -EINVAL;
2882 }
2883 if (num_vfs > 0) {
2884 dev_err(&pdev->dev, "VFs not supported\n");
2885 return -EINVAL;
2886 }
2887 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2888 SLI_INTF_FAMILY_SHIFT);
2889 adapter->generation = BE_GEN3;
2890 break;
2891 default:
2892 adapter->generation = 0;
2893 }
2894 return 0;
2895}
2896
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002897static int __devinit be_probe(struct pci_dev *pdev,
2898 const struct pci_device_id *pdev_id)
2899{
2900 int status = 0;
2901 struct be_adapter *adapter;
2902 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002903
2904 status = pci_enable_device(pdev);
2905 if (status)
2906 goto do_none;
2907
2908 status = pci_request_regions(pdev, DRV_NAME);
2909 if (status)
2910 goto disable_dev;
2911 pci_set_master(pdev);
2912
2913 netdev = alloc_etherdev(sizeof(struct be_adapter));
2914 if (netdev == NULL) {
2915 status = -ENOMEM;
2916 goto rel_reg;
2917 }
2918 adapter = netdev_priv(netdev);
2919 adapter->pdev = pdev;
2920 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002921
2922 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00002923 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002924 goto free_netdev;
2925
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002926 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002927 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002928
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002929 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002930 if (!status) {
2931 netdev->features |= NETIF_F_HIGHDMA;
2932 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002933 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002934 if (status) {
2935 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2936 goto free_netdev;
2937 }
2938 }
2939
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002940 be_sriov_enable(adapter);
2941
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002942 status = be_ctrl_init(adapter);
2943 if (status)
2944 goto free_netdev;
2945
Sathya Perla2243e2e2009-11-22 22:02:03 +00002946 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002947 if (be_physfn(adapter)) {
2948 status = be_cmd_POST(adapter);
2949 if (status)
2950 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002951 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002952
2953 /* tell fw we're ready to fire cmds */
2954 status = be_cmd_fw_init(adapter);
2955 if (status)
2956 goto ctrl_clean;
2957
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07002958 if (be_physfn(adapter)) {
2959 status = be_cmd_reset_function(adapter);
2960 if (status)
2961 goto ctrl_clean;
2962 }
2963
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002964 status = be_stats_init(adapter);
2965 if (status)
2966 goto ctrl_clean;
2967
Sathya Perla2243e2e2009-11-22 22:02:03 +00002968 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002969 if (status)
2970 goto stats_clean;
2971
Sathya Perla3abcded2010-10-03 22:12:27 -07002972 be_msix_enable(adapter);
2973
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002974 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002975
Sathya Perla5fb379e2009-06-18 00:02:59 +00002976 status = be_setup(adapter);
2977 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002978 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002979
Sathya Perla3abcded2010-10-03 22:12:27 -07002980 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981 status = register_netdev(netdev);
2982 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002983 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00002984 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002985
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002986 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00002987 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002988 return 0;
2989
Sathya Perla5fb379e2009-06-18 00:02:59 +00002990unsetup:
2991 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07002992msix_disable:
2993 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002994stats_clean:
2995 be_stats_cleanup(adapter);
2996ctrl_clean:
2997 be_ctrl_cleanup(adapter);
2998free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002999 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003000 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003001 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003002rel_reg:
3003 pci_release_regions(pdev);
3004disable_dev:
3005 pci_disable_device(pdev);
3006do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003007 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003008 return status;
3009}
3010
3011static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3012{
3013 struct be_adapter *adapter = pci_get_drvdata(pdev);
3014 struct net_device *netdev = adapter->netdev;
3015
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003016 if (adapter->wol)
3017 be_setup_wol(adapter, true);
3018
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019 netif_device_detach(netdev);
3020 if (netif_running(netdev)) {
3021 rtnl_lock();
3022 be_close(netdev);
3023 rtnl_unlock();
3024 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003025 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003026 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003027
3028 pci_save_state(pdev);
3029 pci_disable_device(pdev);
3030 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3031 return 0;
3032}
3033
3034static int be_resume(struct pci_dev *pdev)
3035{
3036 int status = 0;
3037 struct be_adapter *adapter = pci_get_drvdata(pdev);
3038 struct net_device *netdev = adapter->netdev;
3039
3040 netif_device_detach(netdev);
3041
3042 status = pci_enable_device(pdev);
3043 if (status)
3044 return status;
3045
3046 pci_set_power_state(pdev, 0);
3047 pci_restore_state(pdev);
3048
Sathya Perla2243e2e2009-11-22 22:02:03 +00003049 /* tell fw we're ready to fire cmds */
3050 status = be_cmd_fw_init(adapter);
3051 if (status)
3052 return status;
3053
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003054 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003055 if (netif_running(netdev)) {
3056 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003057 be_open(netdev);
3058 rtnl_unlock();
3059 }
3060 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003061
3062 if (adapter->wol)
3063 be_setup_wol(adapter, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003064 return 0;
3065}
3066
Sathya Perla82456b02010-02-17 01:35:37 +00003067/*
3068 * An FLR will stop BE from DMAing any data.
3069 */
3070static void be_shutdown(struct pci_dev *pdev)
3071{
3072 struct be_adapter *adapter = pci_get_drvdata(pdev);
3073 struct net_device *netdev = adapter->netdev;
3074
3075 netif_device_detach(netdev);
3076
3077 be_cmd_reset_function(adapter);
3078
3079 if (adapter->wol)
3080 be_setup_wol(adapter, true);
3081
3082 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003083}
3084
Sathya Perlacf588472010-02-14 21:22:01 +00003085static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3086 pci_channel_state_t state)
3087{
3088 struct be_adapter *adapter = pci_get_drvdata(pdev);
3089 struct net_device *netdev = adapter->netdev;
3090
3091 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3092
3093 adapter->eeh_err = true;
3094
3095 netif_device_detach(netdev);
3096
3097 if (netif_running(netdev)) {
3098 rtnl_lock();
3099 be_close(netdev);
3100 rtnl_unlock();
3101 }
3102 be_clear(adapter);
3103
3104 if (state == pci_channel_io_perm_failure)
3105 return PCI_ERS_RESULT_DISCONNECT;
3106
3107 pci_disable_device(pdev);
3108
3109 return PCI_ERS_RESULT_NEED_RESET;
3110}
3111
3112static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3113{
3114 struct be_adapter *adapter = pci_get_drvdata(pdev);
3115 int status;
3116
3117 dev_info(&adapter->pdev->dev, "EEH reset\n");
3118 adapter->eeh_err = false;
3119
3120 status = pci_enable_device(pdev);
3121 if (status)
3122 return PCI_ERS_RESULT_DISCONNECT;
3123
3124 pci_set_master(pdev);
3125 pci_set_power_state(pdev, 0);
3126 pci_restore_state(pdev);
3127
3128 /* Check if card is ok and fw is ready */
3129 status = be_cmd_POST(adapter);
3130 if (status)
3131 return PCI_ERS_RESULT_DISCONNECT;
3132
3133 return PCI_ERS_RESULT_RECOVERED;
3134}
3135
3136static void be_eeh_resume(struct pci_dev *pdev)
3137{
3138 int status = 0;
3139 struct be_adapter *adapter = pci_get_drvdata(pdev);
3140 struct net_device *netdev = adapter->netdev;
3141
3142 dev_info(&adapter->pdev->dev, "EEH resume\n");
3143
3144 pci_save_state(pdev);
3145
3146 /* tell fw we're ready to fire cmds */
3147 status = be_cmd_fw_init(adapter);
3148 if (status)
3149 goto err;
3150
3151 status = be_setup(adapter);
3152 if (status)
3153 goto err;
3154
3155 if (netif_running(netdev)) {
3156 status = be_open(netdev);
3157 if (status)
3158 goto err;
3159 }
3160 netif_device_attach(netdev);
3161 return;
3162err:
3163 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003164}
3165
3166static struct pci_error_handlers be_eeh_handlers = {
3167 .error_detected = be_eeh_err_detected,
3168 .slot_reset = be_eeh_reset,
3169 .resume = be_eeh_resume,
3170};
3171
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003172static struct pci_driver be_driver = {
3173 .name = DRV_NAME,
3174 .id_table = be_dev_ids,
3175 .probe = be_probe,
3176 .remove = be_remove,
3177 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003178 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003179 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003180 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003181};
3182
3183static int __init be_init_module(void)
3184{
Joe Perches8e95a202009-12-03 07:58:21 +00003185 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3186 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003187 printk(KERN_WARNING DRV_NAME
3188 " : Module param rx_frag_size must be 2048/4096/8192."
3189 " Using 2048\n");
3190 rx_frag_size = 2048;
3191 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003193 if (num_vfs > 32) {
3194 printk(KERN_WARNING DRV_NAME
3195 " : Module param num_vfs must not be greater than 32."
3196 "Using 32\n");
3197 num_vfs = 32;
3198 }
3199
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003200 return pci_register_driver(&be_driver);
3201}
3202module_init(be_init_module);
3203
3204static void __exit be_exit_module(void)
3205{
3206 pci_unregister_driver(&be_driver);
3207}
3208module_exit(be_exit_module);