blob: de40d3b7152f5f1454d5e998f54007600ebb860a [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030module_param(rx_frag_size, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
Sathya Perla3abcded2010-10-03 22:12:27 -0700119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
128 pci_free_consistent(adapter->pdev, mem->size,
129 mem->va, mem->dma);
130}
131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
141 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
142 if (!mem->va)
143 return -1;
144 memset(mem->va, 0, mem->size);
145 return 0;
146}
147
Sathya Perla8788fdc2009-07-27 22:52:03 +0000148static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000150 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 u32 reg = ioread32(addr);
152 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153
Sathya Perlacf588472010-02-14 21:22:01 +0000154 if (adapter->eeh_err)
155 return;
156
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 iowrite32(reg, addr);
165}
166
Sathya Perla8788fdc2009-07-27 22:52:03 +0000167static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168{
169 u32 val = 0;
170 val |= qid & DB_RQ_RING_ID_MASK;
171 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000172
173 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_TXULP_RING_ID_MASK;
181 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188 bool arm, bool clear_int, u16 num_popped)
189{
190 u32 val = 0;
191 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000192 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000194
195 if (adapter->eeh_err)
196 return;
197
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 if (arm)
199 val |= 1 << DB_EQ_REARM_SHIFT;
200 if (clear_int)
201 val |= 1 << DB_EQ_CLR_SHIFT;
202 val |= 1 << DB_EQ_EVNT_SHIFT;
203 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205}
206
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208{
209 u32 val = 0;
210 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000211 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000213
214 if (adapter->eeh_err)
215 return;
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217 if (arm)
218 val |= 1 << DB_CQ_REARM_SHIFT;
219 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000220 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221}
222
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700223static int be_mac_addr_set(struct net_device *netdev, void *p)
224{
225 struct be_adapter *adapter = netdev_priv(netdev);
226 struct sockaddr *addr = p;
227 int status = 0;
228
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000229 if (!is_valid_ether_addr(addr->sa_data))
230 return -EADDRNOTAVAIL;
231
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000232 /* MAC addr configuration will be done in hardware for VFs
233 * by their corresponding PFs. Just copy to netdev addr here
234 */
235 if (!be_physfn(adapter))
236 goto netdev_addr;
237
Sathya Perlaa65027e2009-08-17 00:58:04 +0000238 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
239 if (status)
240 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241
Sathya Perlaa65027e2009-08-17 00:58:04 +0000242 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
243 adapter->if_handle, &adapter->pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000244netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (!status)
246 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
247
248 return status;
249}
250
Sathya Perlab31c50a2009-09-17 10:30:13 -0700251void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252{
Sathya Perla3abcded2010-10-03 22:12:27 -0700253 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
255 struct be_port_rxf_stats *port_stats =
256 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700257 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000258 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700259 struct be_rx_obj *rxo;
260 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261
Sathya Perla3abcded2010-10-03 22:12:27 -0700262 memset(dev_stats, 0, sizeof(*dev_stats));
263 for_all_rx_queues(adapter, rxo, i) {
264 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
265 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
266 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
267 /* no space in linux buffers: best possible approximation */
268 dev_stats->rx_dropped +=
269 erx_stats->rx_drops_no_fragments[rxo->q.id];
270 }
271
272 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
273 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700274
275 /* bad pkts received */
276 dev_stats->rx_errors = port_stats->rx_crc_errors +
277 port_stats->rx_alignment_symbol_errors +
278 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000279 port_stats->rx_out_range_errors +
280 port_stats->rx_frame_too_long +
281 port_stats->rx_dropped_too_small +
282 port_stats->rx_dropped_too_short +
283 port_stats->rx_dropped_header_too_small +
284 port_stats->rx_dropped_tcp_length +
285 port_stats->rx_dropped_runt +
286 port_stats->rx_tcp_checksum_errs +
287 port_stats->rx_ip_checksum_errs +
288 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700290 /* detailed rx errors */
291 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000292 port_stats->rx_out_range_errors +
293 port_stats->rx_frame_too_long;
294
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700295 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
296
297 /* frame alignment errors */
298 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000299
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700300 /* receiver fifo overrun */
301 /* drops_no_pbuf is no per i/f, it's per BE card */
302 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
303 port_stats->rx_input_fifo_overflow +
304 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700305}
306
Sathya Perla8788fdc2009-07-27 22:52:03 +0000307void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700309 struct net_device *netdev = adapter->netdev;
310
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000312 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000313 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000314 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700315 netif_start_queue(netdev);
316 netif_carrier_on(netdev);
317 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000318 } else {
319 netif_stop_queue(netdev);
320 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700322 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000323 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700324 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700325}
326
327/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700328static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700329{
Sathya Perla3abcded2010-10-03 22:12:27 -0700330 struct be_eq_obj *rx_eq = &rxo->rx_eq;
331 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700332 ulong now = jiffies;
333 u32 eqd;
334
335 if (!rx_eq->enable_aic)
336 return;
337
338 /* Wrapped around */
339 if (time_before(now, stats->rx_fps_jiffies)) {
340 stats->rx_fps_jiffies = now;
341 return;
342 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700343
344 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700345 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700346 return;
347
Sathya Perla3abcded2010-10-03 22:12:27 -0700348 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700349 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700350
Sathya Perla4097f662009-03-24 16:40:13 -0700351 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700352 stats->prev_rx_frags = stats->rx_frags;
353 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700354 eqd = eqd << 3;
355 if (eqd > rx_eq->max_eqd)
356 eqd = rx_eq->max_eqd;
357 if (eqd < rx_eq->min_eqd)
358 eqd = rx_eq->min_eqd;
359 if (eqd < 10)
360 eqd = 0;
361 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000362 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700363
364 rx_eq->cur_eqd = eqd;
365}
366
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700367static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368{
369 u64 rate = bytes;
370
371 do_div(rate, ticks / HZ);
372 rate <<= 3; /* bytes/sec -> bits/sec */
373 do_div(rate, 1000000ul); /* MB/Sec */
374
375 return rate;
376}
377
Sathya Perla4097f662009-03-24 16:40:13 -0700378static void be_tx_rate_update(struct be_adapter *adapter)
379{
Sathya Perla3abcded2010-10-03 22:12:27 -0700380 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700381 ulong now = jiffies;
382
383 /* Wrapped around? */
384 if (time_before(now, stats->be_tx_jiffies)) {
385 stats->be_tx_jiffies = now;
386 return;
387 }
388
389 /* Update tx rate once in two seconds */
390 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700391 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392 - stats->be_tx_bytes_prev,
393 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700394 stats->be_tx_jiffies = now;
395 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396 }
397}
398
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700399static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000400 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700401{
Sathya Perla3abcded2010-10-03 22:12:27 -0700402 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700403 stats->be_tx_reqs++;
404 stats->be_tx_wrbs += wrb_cnt;
405 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000406 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700407 if (stopped)
408 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409}
410
411/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000412static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700414{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700415 int cnt = (skb->len > skb->data_len);
416
417 cnt += skb_shinfo(skb)->nr_frags;
418
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700419 /* to account for hdr wrb */
420 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700424 /* add a dummy to make it an even num */
425 cnt++;
426 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000427 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429 return cnt;
430}
431
432static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433{
434 wrb->frag_pa_hi = upper_32_bits(addr);
435 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437}
438
Somnath Koturcc4ce022010-10-21 07:11:14 -0700439static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700442 u8 vlan_prio = 0;
443 u16 vlan_tag = 0;
444
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700445 memset(hdr, 0, sizeof(*hdr));
446
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000449 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
464 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466 if (is_tcp_pkt(skb))
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468 else if (is_udp_pkt(skb))
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470 }
471
Somnath Koturcc4ce022010-10-21 07:11:14 -0700472 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700473 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700474 vlan_tag = vlan_tx_tag_get(skb);
475 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476 /* If vlan priority provided by OS is NOT in available bmap */
477 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479 adapter->recommended_prio;
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481 }
482
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487}
488
Sathya Perla7101e112010-03-22 20:41:12 +0000489static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
490 bool unmap_single)
491{
492 dma_addr_t dma;
493
494 be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000497 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000498 if (unmap_single)
499 pci_unmap_single(pdev, dma, wrb->frag_len,
500 PCI_DMA_TODEVICE);
501 else
502 pci_unmap_page(pdev, dma, wrb->frag_len,
503 PCI_DMA_TODEVICE);
504 }
505}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506
507static int make_tx_wrbs(struct be_adapter *adapter,
508 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
509{
Sathya Perla7101e112010-03-22 20:41:12 +0000510 dma_addr_t busaddr;
511 int i, copied = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 struct pci_dev *pdev = adapter->pdev;
513 struct sk_buff *first_skb = skb;
514 struct be_queue_info *txq = &adapter->tx_obj.q;
515 struct be_eth_wrb *wrb;
516 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000517 bool map_single = false;
518 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700519
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520 hdr = queue_head_node(txq);
521 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000522 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
David S. Millerebc8d2a2009-06-09 01:01:31 -0700524 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700525 int len = skb_headlen(skb);
Alexander Duycka73b7962009-12-02 16:48:18 +0000526 busaddr = pci_map_single(pdev, skb->data, len,
527 PCI_DMA_TODEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000528 if (pci_dma_mapping_error(pdev, busaddr))
529 goto dma_err;
530 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700531 wrb = queue_head_node(txq);
532 wrb_fill(wrb, busaddr, len);
533 be_dws_cpu_to_le(wrb, sizeof(*wrb));
534 queue_head_inc(txq);
535 copied += len;
536 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537
David S. Millerebc8d2a2009-06-09 01:01:31 -0700538 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
539 struct skb_frag_struct *frag =
540 &skb_shinfo(skb)->frags[i];
Alexander Duycka73b7962009-12-02 16:48:18 +0000541 busaddr = pci_map_page(pdev, frag->page,
542 frag->page_offset,
543 frag->size, PCI_DMA_TODEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000544 if (pci_dma_mapping_error(pdev, busaddr))
545 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, busaddr, frag->size);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700551 }
552
553 if (dummy_wrb) {
554 wrb = queue_head_node(txq);
555 wrb_fill(wrb, 0, 0);
556 be_dws_cpu_to_le(wrb, sizeof(*wrb));
557 queue_head_inc(txq);
558 }
559
Somnath Koturcc4ce022010-10-21 07:11:14 -0700560 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 be_dws_cpu_to_le(hdr, sizeof(*hdr));
562
563 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000564dma_err:
565 txq->head = map_head;
566 while (copied) {
567 wrb = queue_head_node(txq);
568 unmap_tx_frag(pdev, wrb, map_single);
569 map_single = false;
570 copied -= wrb->frag_len;
571 queue_head_inc(txq);
572 }
573 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700574}
575
Stephen Hemminger613573252009-08-31 19:50:58 +0000576static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700577 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578{
579 struct be_adapter *adapter = netdev_priv(netdev);
580 struct be_tx_obj *tx_obj = &adapter->tx_obj;
581 struct be_queue_info *txq = &tx_obj->q;
582 u32 wrb_cnt = 0, copied = 0;
583 u32 start = txq->head;
584 bool dummy_wrb, stopped = false;
585
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000586 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587
588 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000589 if (copied) {
590 /* record the sent skb in the sent_skb table */
591 BUG_ON(tx_obj->sent_skb_list[start]);
592 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000594 /* Ensure txq has space for the next skb; Else stop the queue
595 * *BEFORE* ringing the tx doorbell, so that we serialze the
596 * tx compls of the current transmit which'll wake up the queue
597 */
Sathya Perla7101e112010-03-22 20:41:12 +0000598 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000599 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
600 txq->len) {
601 netif_stop_queue(netdev);
602 stopped = true;
603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000605 be_txq_notify(adapter, txq->id, wrb_cnt);
606
Ajit Khaparde91992e42010-02-19 13:57:12 +0000607 be_tx_stats_update(adapter, wrb_cnt, copied,
608 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000609 } else {
610 txq->head = start;
611 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 return NETDEV_TX_OK;
614}
615
616static int be_change_mtu(struct net_device *netdev, int new_mtu)
617{
618 struct be_adapter *adapter = netdev_priv(netdev);
619 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000620 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
621 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 dev_info(&adapter->pdev->dev,
623 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000624 BE_MIN_MTU,
625 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 return -EINVAL;
627 }
628 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
629 netdev->mtu, new_mtu);
630 netdev->mtu = new_mtu;
631 return 0;
632}
633
634/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000635 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
636 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000638static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 u16 vtag[BE_NUM_VLANS_SUPPORTED];
641 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000642 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000643 u32 if_handle;
644
645 if (vf) {
646 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
647 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
648 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
649 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650
Ajit Khaparde82903e42010-02-09 01:34:57 +0000651 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000653 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654 if (adapter->vlan_tag[i]) {
655 vtag[ntags] = cpu_to_le16(i);
656 ntags++;
657 }
658 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700659 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700662 status = be_cmd_vlan_config(adapter, adapter->if_handle,
663 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000665
Sathya Perlab31c50a2009-09-17 10:30:13 -0700666 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667}
668
669static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
670{
671 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
676static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
677{
678 struct be_adapter *adapter = netdev_priv(netdev);
679
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000680 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000681 if (!be_physfn(adapter))
682 return;
683
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000685 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000686 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687}
688
689static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
690{
691 struct be_adapter *adapter = netdev_priv(netdev);
692
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000693 adapter->vlans_added--;
694 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
695
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000696 if (!be_physfn(adapter))
697 return;
698
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000700 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000701 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704static void be_set_multicast_list(struct net_device *netdev)
705{
706 struct be_adapter *adapter = netdev_priv(netdev);
707
708 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000709 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000710 adapter->promiscuous = true;
711 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000713
714 /* BE was previously in promiscous mode; disable it */
715 if (adapter->promiscuous) {
716 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000717 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000718 }
719
Sathya Perlae7b909a2009-11-22 22:01:10 +0000720 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000721 if (netdev->flags & IFF_ALLMULTI ||
722 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000723 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000724 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000725 goto done;
726 }
727
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000728 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800729 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000730done:
731 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732}
733
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000734static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
735{
736 struct be_adapter *adapter = netdev_priv(netdev);
737 int status;
738
739 if (!adapter->sriov_enabled)
740 return -EPERM;
741
742 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
743 return -EINVAL;
744
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000745 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
746 status = be_cmd_pmac_del(adapter,
747 adapter->vf_cfg[vf].vf_if_handle,
748 adapter->vf_cfg[vf].vf_pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000749
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000750 status = be_cmd_pmac_add(adapter, mac,
751 adapter->vf_cfg[vf].vf_if_handle,
752 &adapter->vf_cfg[vf].vf_pmac_id);
753
754 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000755 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
756 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000757 else
758 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
759
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000760 return status;
761}
762
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000763static int be_get_vf_config(struct net_device *netdev, int vf,
764 struct ifla_vf_info *vi)
765{
766 struct be_adapter *adapter = netdev_priv(netdev);
767
768 if (!adapter->sriov_enabled)
769 return -EPERM;
770
771 if (vf >= num_vfs)
772 return -EINVAL;
773
774 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000775 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000776 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000777 vi->qos = 0;
778 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
779
780 return 0;
781}
782
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000783static int be_set_vf_vlan(struct net_device *netdev,
784 int vf, u16 vlan, u8 qos)
785{
786 struct be_adapter *adapter = netdev_priv(netdev);
787 int status = 0;
788
789 if (!adapter->sriov_enabled)
790 return -EPERM;
791
792 if ((vf >= num_vfs) || (vlan > 4095))
793 return -EINVAL;
794
795 if (vlan) {
796 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
797 adapter->vlans_added++;
798 } else {
799 adapter->vf_cfg[vf].vf_vlan_tag = 0;
800 adapter->vlans_added--;
801 }
802
803 status = be_vid_config(adapter, true, vf);
804
805 if (status)
806 dev_info(&adapter->pdev->dev,
807 "VLAN %d config on VF %d failed\n", vlan, vf);
808 return status;
809}
810
Ajit Khapardee1d18732010-07-23 01:52:13 +0000811static int be_set_vf_tx_rate(struct net_device *netdev,
812 int vf, int rate)
813{
814 struct be_adapter *adapter = netdev_priv(netdev);
815 int status = 0;
816
817 if (!adapter->sriov_enabled)
818 return -EPERM;
819
820 if ((vf >= num_vfs) || (rate < 0))
821 return -EINVAL;
822
823 if (rate > 10000)
824 rate = 10000;
825
826 adapter->vf_cfg[vf].vf_tx_rate = rate;
827 status = be_cmd_set_qos(adapter, rate / 10, vf);
828
829 if (status)
830 dev_info(&adapter->pdev->dev,
831 "tx rate %d on VF %d failed\n", rate, vf);
832 return status;
833}
834
Sathya Perla3abcded2010-10-03 22:12:27 -0700835static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836{
Sathya Perla3abcded2010-10-03 22:12:27 -0700837 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700838 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839
Sathya Perla4097f662009-03-24 16:40:13 -0700840 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700841 if (time_before(now, stats->rx_jiffies)) {
842 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700843 return;
844 }
845
846 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700847 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700848 return;
849
Sathya Perla3abcded2010-10-03 22:12:27 -0700850 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
851 now - stats->rx_jiffies);
852 stats->rx_jiffies = now;
853 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700854}
855
Sathya Perla3abcded2010-10-03 22:12:27 -0700856static void be_rx_stats_update(struct be_rx_obj *rxo,
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000857 u32 pktsize, u16 numfrags, u8 pkt_type)
Sathya Perla4097f662009-03-24 16:40:13 -0700858{
Sathya Perla3abcded2010-10-03 22:12:27 -0700859 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700860
Sathya Perla3abcded2010-10-03 22:12:27 -0700861 stats->rx_compl++;
862 stats->rx_frags += numfrags;
863 stats->rx_bytes += pktsize;
864 stats->rx_pkts++;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000865 if (pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700866 stats->rx_mcast_pkts++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700867}
868
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000869static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700870{
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000871 u8 l4_cksm, ipv6, ipcksm;
Ajit Khaparde728a9972009-04-13 15:41:22 -0700872
873 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
874 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000875 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700876
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000877 /* Ignore ipcksm for ipv6 pkts */
878 return l4_cksm && (ipcksm || ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700879}
880
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700882get_rx_page_info(struct be_adapter *adapter,
883 struct be_rx_obj *rxo,
884 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700885{
886 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700887 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888
Sathya Perla3abcded2010-10-03 22:12:27 -0700889 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700890 BUG_ON(!rx_page_info->page);
891
Ajit Khaparde205859a2010-02-09 01:34:21 +0000892 if (rx_page_info->last_page_user) {
FUJITA Tomonorifac6da52010-04-01 16:53:22 +0000893 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700894 adapter->big_page_size, PCI_DMA_FROMDEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000895 rx_page_info->last_page_user = false;
896 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897
898 atomic_dec(&rxq->used);
899 return rx_page_info;
900}
901
902/* Throwaway the data in the Rx completion */
903static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700904 struct be_rx_obj *rxo,
905 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700906{
Sathya Perla3abcded2010-10-03 22:12:27 -0700907 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908 struct be_rx_page_info *page_info;
909 u16 rxq_idx, i, num_rcvd;
910
911 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
912 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
913
Sathya Perla64642812010-12-01 01:04:17 +0000914 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
915 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
916
917 rxo->last_frag_index = rxq_idx;
918
919 for (i = 0; i < num_rcvd; i++) {
920 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
921 put_page(page_info->page);
922 memset(page_info, 0, sizeof(*page_info));
923 index_inc(&rxq_idx, rxq->len);
924 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700925 }
926}
927
928/*
929 * skb_fill_rx_data forms a complete skb for an ether frame
930 * indicated by rxcp.
931 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700932static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla89420422010-02-17 01:35:26 +0000933 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
934 u16 num_rcvd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935{
Sathya Perla3abcded2010-10-03 22:12:27 -0700936 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700937 struct be_rx_page_info *page_info;
Sathya Perla89420422010-02-17 01:35:26 +0000938 u16 rxq_idx, i, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700939 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700940 u8 *start;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000941 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942
943 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
944 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000945 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946
Sathya Perla3abcded2010-10-03 22:12:27 -0700947 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700948
949 start = page_address(page_info->page) + page_info->page_offset;
950 prefetch(start);
951
952 /* Copy data in the first descriptor of this completion */
953 curr_frag_len = min(pktsize, rx_frag_size);
954
955 /* Copy the header portion into skb_data */
956 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
957 memcpy(skb->data, start, hdr_len);
958 skb->len = curr_frag_len;
959 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
960 /* Complete packet has now been moved to data */
961 put_page(page_info->page);
962 skb->data_len = 0;
963 skb->tail += curr_frag_len;
964 } else {
965 skb_shinfo(skb)->nr_frags = 1;
966 skb_shinfo(skb)->frags[0].page = page_info->page;
967 skb_shinfo(skb)->frags[0].page_offset =
968 page_info->page_offset + hdr_len;
969 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
970 skb->data_len = curr_frag_len - hdr_len;
971 skb->tail += hdr_len;
972 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000973 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700974
975 if (pktsize <= rx_frag_size) {
976 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000977 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700978 }
979
980 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700981 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000982 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700983 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984 index_inc(&rxq_idx, rxq->len);
Sathya Perla3abcded2010-10-03 22:12:27 -0700985 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700986
Ajit Khapardefa774062009-07-22 09:28:55 -0700987 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700988
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000989 /* Coalesce all frags from the same physical page in one slot */
990 if (page_info->page_offset == 0) {
991 /* Fresh page */
992 j++;
993 skb_shinfo(skb)->frags[j].page = page_info->page;
994 skb_shinfo(skb)->frags[j].page_offset =
995 page_info->page_offset;
996 skb_shinfo(skb)->frags[j].size = 0;
997 skb_shinfo(skb)->nr_frags++;
998 } else {
999 put_page(page_info->page);
1000 }
1001
1002 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001003 skb->len += curr_frag_len;
1004 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001005
Ajit Khaparde205859a2010-02-09 01:34:21 +00001006 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001007 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001008 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009
Sathya Perla76fbb422009-06-10 02:21:56 +00001010done:
Sathya Perla3abcded2010-10-03 22:12:27 -07001011 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012}
1013
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001014/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001016 struct be_rx_obj *rxo,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001017 struct be_eth_rx_compl *rxcp)
1018{
1019 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001020 u32 vlanf, vid;
Sathya Perla89420422010-02-17 01:35:26 +00001021 u16 num_rcvd;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001022 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023
Sathya Perla89420422010-02-17 01:35:26 +00001024 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
Sathya Perla89420422010-02-17 01:35:26 +00001025
Eric Dumazet89d71a62009-10-13 05:34:20 +00001026 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001027 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028 if (net_ratelimit())
1029 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001030 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001031 return;
1032 }
1033
Sathya Perla3abcded2010-10-03 22:12:27 -07001034 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001036 if (likely(adapter->rx_csum && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001037 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001038 else
1039 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040
1041 skb->truesize = skb->len + sizeof(struct sk_buff);
1042 skb->protocol = eth_type_trans(skb, adapter->netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001043
Sathya Perlaa058a632010-02-17 01:34:22 +00001044 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1045 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1046
1047 /* vlanf could be wrongly set in some cards.
1048 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001049 if ((adapter->function_mode & 0x400) && !vtm)
Sathya Perlaa058a632010-02-17 01:34:22 +00001050 vlanf = 0;
1051
1052 if (unlikely(vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001053 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054 kfree_skb(skb);
1055 return;
1056 }
1057 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001058 if (!lancer_chip(adapter))
1059 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1061 } else {
1062 netif_receive_skb(skb);
1063 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064}
1065
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001066/* Process the RX completion indicated by rxcp when GRO is enabled */
1067static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001068 struct be_rx_obj *rxo,
1069 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070{
1071 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001072 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001073 struct be_queue_info *rxq = &rxo->q;
1074 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001075 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001076 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001077 u8 vtm;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001078 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079
1080 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1081 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1082 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1083 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001084 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001085 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001086
1087 /* vlanf could be wrongly set in some cards.
1088 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001089 if ((adapter->function_mode & 0x400) && !vtm)
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001090 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001092 skb = napi_get_frags(&eq_obj->napi);
1093 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001094 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001095 return;
1096 }
1097
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001099 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001100 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101
1102 curr_frag_len = min(remaining, rx_frag_size);
1103
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001104 /* Coalesce all frags from the same physical page in one slot */
1105 if (i == 0 || page_info->page_offset == 0) {
1106 /* First frag or Fresh page */
1107 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001108 skb_shinfo(skb)->frags[j].page = page_info->page;
1109 skb_shinfo(skb)->frags[j].page_offset =
1110 page_info->page_offset;
1111 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001112 } else {
1113 put_page(page_info->page);
1114 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001115 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119 memset(page_info, 0, sizeof(*page_info));
1120 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001121 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001123 skb_shinfo(skb)->nr_frags = j + 1;
1124 skb->len = pkt_size;
1125 skb->data_len = pkt_size;
1126 skb->truesize += pkt_size;
1127 skb->ip_summed = CHECKSUM_UNNECESSARY;
1128
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001130 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131 } else {
1132 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001133 if (!lancer_chip(adapter))
1134 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135
Ajit Khaparde82903e42010-02-09 01:34:57 +00001136 if (!adapter->vlan_grp || adapter->vlans_added == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001137 return;
1138
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001139 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001140 }
1141
Sathya Perla3abcded2010-10-03 22:12:27 -07001142 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143}
1144
Sathya Perla3abcded2010-10-03 22:12:27 -07001145static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146{
Sathya Perla3abcded2010-10-03 22:12:27 -07001147 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148
1149 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1150 return NULL;
1151
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001152 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1154
Sathya Perla3abcded2010-10-03 22:12:27 -07001155 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156 return rxcp;
1157}
1158
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001159/* To reset the valid bit, we need to reset the whole word as
1160 * when walking the queue the valid entries are little-endian
1161 * and invalid entries are host endian
1162 */
1163static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1164{
1165 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1166}
1167
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168static inline struct page *be_alloc_pages(u32 size)
1169{
1170 gfp_t alloc_flags = GFP_ATOMIC;
1171 u32 order = get_order(size);
1172 if (order > 0)
1173 alloc_flags |= __GFP_COMP;
1174 return alloc_pages(alloc_flags, order);
1175}
1176
1177/*
1178 * Allocate a page, split it to fragments of size rx_frag_size and post as
1179 * receive buffers to BE
1180 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001181static void be_post_rx_frags(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182{
Sathya Perla3abcded2010-10-03 22:12:27 -07001183 struct be_adapter *adapter = rxo->adapter;
1184 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001185 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001186 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187 struct page *pagep = NULL;
1188 struct be_eth_rx_d *rxd;
1189 u64 page_dmaaddr = 0, frag_dmaaddr;
1190 u32 posted, page_offset = 0;
1191
Sathya Perla3abcded2010-10-03 22:12:27 -07001192 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001193 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1194 if (!pagep) {
1195 pagep = be_alloc_pages(adapter->big_page_size);
1196 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001197 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198 break;
1199 }
1200 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1201 adapter->big_page_size,
1202 PCI_DMA_FROMDEVICE);
1203 page_info->page_offset = 0;
1204 } else {
1205 get_page(pagep);
1206 page_info->page_offset = page_offset + rx_frag_size;
1207 }
1208 page_offset = page_info->page_offset;
1209 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001210 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1212
1213 rxd = queue_head_node(rxq);
1214 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1215 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001216
1217 /* Any space left in the current big page for another frag? */
1218 if ((page_offset + rx_frag_size + rx_frag_size) >
1219 adapter->big_page_size) {
1220 pagep = NULL;
1221 page_info->last_page_user = true;
1222 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001223
1224 prev_page_info = page_info;
1225 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001226 page_info = &page_info_tbl[rxq->head];
1227 }
1228 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001229 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230
1231 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001233 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001234 } else if (atomic_read(&rxq->used) == 0) {
1235 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001236 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238}
1239
Sathya Perla5fb379e2009-06-18 00:02:59 +00001240static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1243
1244 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1245 return NULL;
1246
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001247 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1249
1250 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1251
1252 queue_tail_inc(tx_cq);
1253 return txcp;
1254}
1255
1256static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1257{
1258 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001259 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1261 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001262 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1263 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001265 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001267 sent_skbs[txq->tail] = NULL;
1268
1269 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001270 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001272 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001273 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001274 wrb = queue_tail_node(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001275 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
Eric Dumazete743d312010-04-14 15:59:40 -07001276 skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001277 unmap_skb_hdr = false;
1278
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279 num_wrbs++;
1280 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001281 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282
1283 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001284
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001285 kfree_skb(sent_skb);
1286}
1287
Sathya Perla859b1e42009-08-10 03:43:51 +00001288static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1289{
1290 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1291
1292 if (!eqe->evt)
1293 return NULL;
1294
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001295 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001296 eqe->evt = le32_to_cpu(eqe->evt);
1297 queue_tail_inc(&eq_obj->q);
1298 return eqe;
1299}
1300
1301static int event_handle(struct be_adapter *adapter,
1302 struct be_eq_obj *eq_obj)
1303{
1304 struct be_eq_entry *eqe;
1305 u16 num = 0;
1306
1307 while ((eqe = event_get(eq_obj)) != NULL) {
1308 eqe->evt = 0;
1309 num++;
1310 }
1311
1312 /* Deal with any spurious interrupts that come
1313 * without events
1314 */
1315 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1316 if (num)
1317 napi_schedule(&eq_obj->napi);
1318
1319 return num;
1320}
1321
1322/* Just read and notify events without processing them.
1323 * Used at the time of destroying event queues */
1324static void be_eq_clean(struct be_adapter *adapter,
1325 struct be_eq_obj *eq_obj)
1326{
1327 struct be_eq_entry *eqe;
1328 u16 num = 0;
1329
1330 while ((eqe = event_get(eq_obj)) != NULL) {
1331 eqe->evt = 0;
1332 num++;
1333 }
1334
1335 if (num)
1336 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1337}
1338
Sathya Perla3abcded2010-10-03 22:12:27 -07001339static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340{
1341 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001342 struct be_queue_info *rxq = &rxo->q;
1343 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344 struct be_eth_rx_compl *rxcp;
1345 u16 tail;
1346
1347 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001348 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1349 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001350 be_rx_compl_reset(rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001351 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 }
1353
1354 /* Then free posted rx buffer that were not used */
1355 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001356 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001357 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 put_page(page_info->page);
1359 memset(page_info, 0, sizeof(*page_info));
1360 }
1361 BUG_ON(atomic_read(&rxq->used));
1362}
1363
Sathya Perlaa8e91792009-08-10 03:42:43 +00001364static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001366 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001368 struct be_eth_tx_compl *txcp;
1369 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001370 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1371 struct sk_buff *sent_skb;
1372 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
Sathya Perlaa8e91792009-08-10 03:42:43 +00001374 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1375 do {
1376 while ((txcp = be_tx_compl_get(tx_cq))) {
1377 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1378 wrb_index, txcp);
1379 be_tx_compl_process(adapter, end_idx);
1380 cmpl++;
1381 }
1382 if (cmpl) {
1383 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1384 cmpl = 0;
1385 }
1386
1387 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1388 break;
1389
1390 mdelay(1);
1391 } while (true);
1392
1393 if (atomic_read(&txq->used))
1394 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1395 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001396
1397 /* free posted tx for which compls will never arrive */
1398 while (atomic_read(&txq->used)) {
1399 sent_skb = sent_skbs[txq->tail];
1400 end_idx = txq->tail;
1401 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001402 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1403 txq->len);
Sathya Perlab03388d2010-02-18 00:37:17 +00001404 be_tx_compl_process(adapter, end_idx);
1405 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406}
1407
Sathya Perla5fb379e2009-06-18 00:02:59 +00001408static void be_mcc_queues_destroy(struct be_adapter *adapter)
1409{
1410 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001411
Sathya Perla8788fdc2009-07-27 22:52:03 +00001412 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001413 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001414 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001415 be_queue_free(adapter, q);
1416
Sathya Perla8788fdc2009-07-27 22:52:03 +00001417 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001418 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001419 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001420 be_queue_free(adapter, q);
1421}
1422
1423/* Must be called only after TX qs are created as MCC shares TX EQ */
1424static int be_mcc_queues_create(struct be_adapter *adapter)
1425{
1426 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001427
1428 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001429 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001430 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001431 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001432 goto err;
1433
1434 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001435 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001436 goto mcc_cq_free;
1437
1438 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001439 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001440 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1441 goto mcc_cq_destroy;
1442
1443 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001444 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001445 goto mcc_q_free;
1446
1447 return 0;
1448
1449mcc_q_free:
1450 be_queue_free(adapter, q);
1451mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001452 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001453mcc_cq_free:
1454 be_queue_free(adapter, cq);
1455err:
1456 return -1;
1457}
1458
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459static void be_tx_queues_destroy(struct be_adapter *adapter)
1460{
1461 struct be_queue_info *q;
1462
1463 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001464 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001465 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466 be_queue_free(adapter, q);
1467
1468 q = &adapter->tx_obj.cq;
1469 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001470 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471 be_queue_free(adapter, q);
1472
Sathya Perla859b1e42009-08-10 03:43:51 +00001473 /* Clear any residual events */
1474 be_eq_clean(adapter, &adapter->tx_eq);
1475
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 q = &adapter->tx_eq.q;
1477 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001478 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 be_queue_free(adapter, q);
1480}
1481
1482static int be_tx_queues_create(struct be_adapter *adapter)
1483{
1484 struct be_queue_info *eq, *q, *cq;
1485
1486 adapter->tx_eq.max_eqd = 0;
1487 adapter->tx_eq.min_eqd = 0;
1488 adapter->tx_eq.cur_eqd = 96;
1489 adapter->tx_eq.enable_aic = false;
1490 /* Alloc Tx Event queue */
1491 eq = &adapter->tx_eq.q;
1492 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1493 return -1;
1494
1495 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001496 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001498
1499 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1500
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001501
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001502 /* Alloc TX eth compl queue */
1503 cq = &adapter->tx_obj.cq;
1504 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1505 sizeof(struct be_eth_tx_compl)))
1506 goto tx_eq_destroy;
1507
1508 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001509 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510 goto tx_cq_free;
1511
1512 /* Alloc TX eth queue */
1513 q = &adapter->tx_obj.q;
1514 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1515 goto tx_cq_destroy;
1516
1517 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001518 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 goto tx_q_free;
1520 return 0;
1521
1522tx_q_free:
1523 be_queue_free(adapter, q);
1524tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001525 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526tx_cq_free:
1527 be_queue_free(adapter, cq);
1528tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001529 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530tx_eq_free:
1531 be_queue_free(adapter, eq);
1532 return -1;
1533}
1534
1535static void be_rx_queues_destroy(struct be_adapter *adapter)
1536{
1537 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 struct be_rx_obj *rxo;
1539 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540
Sathya Perla3abcded2010-10-03 22:12:27 -07001541 for_all_rx_queues(adapter, rxo, i) {
1542 q = &rxo->q;
1543 if (q->created) {
1544 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1545 /* After the rxq is invalidated, wait for a grace time
1546 * of 1ms for all dma to end and the flush compl to
1547 * arrive
1548 */
1549 mdelay(1);
1550 be_rx_q_clean(adapter, rxo);
1551 }
1552 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001553
Sathya Perla3abcded2010-10-03 22:12:27 -07001554 q = &rxo->cq;
1555 if (q->created)
1556 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1557 be_queue_free(adapter, q);
1558
1559 /* Clear any residual events */
1560 q = &rxo->rx_eq.q;
1561 if (q->created) {
1562 be_eq_clean(adapter, &rxo->rx_eq);
1563 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1564 }
1565 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567}
1568
1569static int be_rx_queues_create(struct be_adapter *adapter)
1570{
1571 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001572 struct be_rx_obj *rxo;
1573 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001576 for_all_rx_queues(adapter, rxo, i) {
1577 rxo->adapter = adapter;
Sathya Perla64642812010-12-01 01:04:17 +00001578 /* Init last_frag_index so that the frag index in the first
1579 * completion will never match */
1580 rxo->last_frag_index = 0xffff;
Sathya Perla3abcded2010-10-03 22:12:27 -07001581 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1582 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583
Sathya Perla3abcded2010-10-03 22:12:27 -07001584 /* EQ */
1585 eq = &rxo->rx_eq.q;
1586 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1587 sizeof(struct be_eq_entry));
1588 if (rc)
1589 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590
Sathya Perla3abcded2010-10-03 22:12:27 -07001591 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1592 if (rc)
1593 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001595 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1596
Sathya Perla3abcded2010-10-03 22:12:27 -07001597 /* CQ */
1598 cq = &rxo->cq;
1599 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1600 sizeof(struct be_eth_rx_compl));
1601 if (rc)
1602 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603
Sathya Perla3abcded2010-10-03 22:12:27 -07001604 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1605 if (rc)
1606 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001607 /* Rx Q */
1608 q = &rxo->q;
1609 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1610 sizeof(struct be_eth_rx_d));
1611 if (rc)
1612 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613
Sathya Perla3abcded2010-10-03 22:12:27 -07001614 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1615 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1616 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1617 if (rc)
1618 goto err;
1619 }
1620
1621 if (be_multi_rxq(adapter)) {
1622 u8 rsstable[MAX_RSS_QS];
1623
1624 for_all_rss_queues(adapter, rxo, i)
1625 rsstable[i] = rxo->rss_id;
1626
1627 rc = be_cmd_rss_config(adapter, rsstable,
1628 adapter->num_rx_qs - 1);
1629 if (rc)
1630 goto err;
1631 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632
1633 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001634err:
1635 be_rx_queues_destroy(adapter);
1636 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001639static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001640{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001641 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1642 if (!eqe->evt)
1643 return false;
1644 else
1645 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001646}
1647
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648static irqreturn_t be_intx(int irq, void *dev)
1649{
1650 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001651 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001652 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001654 if (lancer_chip(adapter)) {
1655 if (event_peek(&adapter->tx_eq))
1656 tx = event_handle(adapter, &adapter->tx_eq);
1657 for_all_rx_queues(adapter, rxo, i) {
1658 if (event_peek(&rxo->rx_eq))
1659 rx |= event_handle(adapter, &rxo->rx_eq);
1660 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001662 if (!(tx || rx))
1663 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001664
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001665 } else {
1666 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1667 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1668 if (!isr)
1669 return IRQ_NONE;
1670
1671 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1672 event_handle(adapter, &adapter->tx_eq);
1673
1674 for_all_rx_queues(adapter, rxo, i) {
1675 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1676 event_handle(adapter, &rxo->rx_eq);
1677 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001678 }
Sathya Perlac001c212009-07-01 01:06:07 +00001679
Sathya Perla8788fdc2009-07-27 22:52:03 +00001680 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681}
1682
1683static irqreturn_t be_msix_rx(int irq, void *dev)
1684{
Sathya Perla3abcded2010-10-03 22:12:27 -07001685 struct be_rx_obj *rxo = dev;
1686 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
Sathya Perla3abcded2010-10-03 22:12:27 -07001688 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689
1690 return IRQ_HANDLED;
1691}
1692
Sathya Perla5fb379e2009-06-18 00:02:59 +00001693static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694{
1695 struct be_adapter *adapter = dev;
1696
Sathya Perla8788fdc2009-07-27 22:52:03 +00001697 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698
1699 return IRQ_HANDLED;
1700}
1701
Sathya Perla64642812010-12-01 01:04:17 +00001702static inline bool do_gro(struct be_rx_obj *rxo,
1703 struct be_eth_rx_compl *rxcp, u8 err)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1706
1707 if (err)
Sathya Perla3abcded2010-10-03 22:12:27 -07001708 rxo->stats.rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001710 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001711}
1712
stephen hemminger49b05222010-10-21 07:50:48 +00001713static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714{
1715 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001716 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1717 struct be_adapter *adapter = rxo->adapter;
1718 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719 struct be_eth_rx_compl *rxcp;
1720 u32 work_done;
Sathya Perla64642812010-12-01 01:04:17 +00001721 u16 frag_index, num_rcvd;
1722 u8 err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723
Sathya Perla3abcded2010-10-03 22:12:27 -07001724 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001726 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727 if (!rxcp)
1728 break;
1729
Sathya Perla64642812010-12-01 01:04:17 +00001730 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1731 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1732 rxcp);
1733 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1734 rxcp);
1735
1736 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1737 if (likely(frag_index != rxo->last_frag_index &&
1738 num_rcvd != 0)) {
1739 rxo->last_frag_index = frag_index;
1740
1741 if (do_gro(rxo, rxcp, err))
1742 be_rx_compl_process_gro(adapter, rxo, rxcp);
1743 else
1744 be_rx_compl_process(adapter, rxo, rxcp);
1745 }
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001746
1747 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 }
1749
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001751 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1752 be_post_rx_frags(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753
1754 /* All consumed */
1755 if (work_done < budget) {
1756 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001757 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 } else {
1759 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001760 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761 }
1762 return work_done;
1763}
1764
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001765/* As TX and MCC share the same EQ check for both TX and MCC completions.
1766 * For TX/MCC we don't honour budget; consume everything
1767 */
1768static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001770 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1771 struct be_adapter *adapter =
1772 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001773 struct be_queue_info *txq = &adapter->tx_obj.q;
1774 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001776 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777 u16 end_idx;
1778
Sathya Perla5fb379e2009-06-18 00:02:59 +00001779 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001781 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001783 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784 }
1785
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001786 mcc_compl = be_process_mcc(adapter, &status);
1787
1788 napi_complete(napi);
1789
1790 if (mcc_compl) {
1791 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1792 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1793 }
1794
1795 if (tx_compl) {
1796 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001797
1798 /* As Tx wrbs have been freed up, wake up netdev queue if
1799 * it was stopped due to lack of tx wrbs.
1800 */
1801 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001803 netif_wake_queue(adapter->netdev);
1804 }
1805
Sathya Perla3abcded2010-10-03 22:12:27 -07001806 tx_stats(adapter)->be_tx_events++;
1807 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809
1810 return 1;
1811}
1812
Ajit Khaparded053de92010-09-03 06:23:30 +00001813void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001814{
1815 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1816 u32 i;
1817
1818 pci_read_config_dword(adapter->pdev,
1819 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1820 pci_read_config_dword(adapter->pdev,
1821 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1822 pci_read_config_dword(adapter->pdev,
1823 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1824 pci_read_config_dword(adapter->pdev,
1825 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1826
1827 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1828 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1829
Ajit Khaparded053de92010-09-03 06:23:30 +00001830 if (ue_status_lo || ue_status_hi) {
1831 adapter->ue_detected = true;
1832 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1833 }
1834
Ajit Khaparde7c185272010-07-29 06:16:33 +00001835 if (ue_status_lo) {
1836 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1837 if (ue_status_lo & 1)
1838 dev_err(&adapter->pdev->dev,
1839 "UE: %s bit set\n", ue_status_low_desc[i]);
1840 }
1841 }
1842 if (ue_status_hi) {
1843 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1844 if (ue_status_hi & 1)
1845 dev_err(&adapter->pdev->dev,
1846 "UE: %s bit set\n", ue_status_hi_desc[i]);
1847 }
1848 }
1849
1850}
1851
Sathya Perlaea1dae12009-03-19 23:56:20 -07001852static void be_worker(struct work_struct *work)
1853{
1854 struct be_adapter *adapter =
1855 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001856 struct be_rx_obj *rxo;
1857 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001858
Somnath Koturf203af72010-10-25 23:01:03 +00001859 /* when interrupts are not yet enabled, just reap any pending
1860 * mcc completions */
1861 if (!netif_running(adapter->netdev)) {
1862 int mcc_compl, status = 0;
1863
1864 mcc_compl = be_process_mcc(adapter, &status);
1865
1866 if (mcc_compl) {
1867 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1868 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1869 }
1870 goto reschedule;
1871 }
1872
Ajit Khaparde0fc48c32010-07-29 06:18:58 +00001873 if (!adapter->stats_ioctl_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001874 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001875
Sathya Perla4097f662009-03-24 16:40:13 -07001876 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001877
Sathya Perla3abcded2010-10-03 22:12:27 -07001878 for_all_rx_queues(adapter, rxo, i) {
1879 be_rx_rate_update(rxo);
1880 be_rx_eqd_update(adapter, rxo);
1881
1882 if (rxo->rx_post_starved) {
1883 rxo->rx_post_starved = false;
1884 be_post_rx_frags(rxo);
1885 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001886 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001887 if (!adapter->ue_detected && !lancer_chip(adapter))
Ajit Khaparded053de92010-09-03 06:23:30 +00001888 be_detect_dump_ue(adapter);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001889
Somnath Koturf203af72010-10-25 23:01:03 +00001890reschedule:
Sathya Perlaea1dae12009-03-19 23:56:20 -07001891 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1892}
1893
Sathya Perla8d56ff12009-11-22 22:02:26 +00001894static void be_msix_disable(struct be_adapter *adapter)
1895{
1896 if (adapter->msix_enabled) {
1897 pci_disable_msix(adapter->pdev);
1898 adapter->msix_enabled = false;
1899 }
1900}
1901
Sathya Perla3abcded2010-10-03 22:12:27 -07001902static int be_num_rxqs_get(struct be_adapter *adapter)
1903{
1904 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1905 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1906 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1907 } else {
1908 dev_warn(&adapter->pdev->dev,
1909 "No support for multiple RX queues\n");
1910 return 1;
1911 }
1912}
1913
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914static void be_msix_enable(struct be_adapter *adapter)
1915{
Sathya Perla3abcded2010-10-03 22:12:27 -07001916#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917 int i, status;
1918
Sathya Perla3abcded2010-10-03 22:12:27 -07001919 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1920
1921 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922 adapter->msix_entries[i].entry = i;
1923
1924 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perla3abcded2010-10-03 22:12:27 -07001925 adapter->num_rx_qs + 1);
1926 if (status == 0) {
1927 goto done;
1928 } else if (status >= BE_MIN_MSIX_VECTORS) {
1929 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1930 status) == 0) {
1931 adapter->num_rx_qs = status - 1;
1932 dev_warn(&adapter->pdev->dev,
1933 "Could alloc only %d MSIx vectors. "
1934 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1935 goto done;
1936 }
1937 }
1938 return;
1939done:
1940 adapter->msix_enabled = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941}
1942
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001943static void be_sriov_enable(struct be_adapter *adapter)
1944{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001945 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001946#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001947 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001948 int status;
1949
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001950 status = pci_enable_sriov(adapter->pdev, num_vfs);
1951 adapter->sriov_enabled = status ? false : true;
1952 }
1953#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001954}
1955
1956static void be_sriov_disable(struct be_adapter *adapter)
1957{
1958#ifdef CONFIG_PCI_IOV
1959 if (adapter->sriov_enabled) {
1960 pci_disable_sriov(adapter->pdev);
1961 adapter->sriov_enabled = false;
1962 }
1963#endif
1964}
1965
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001966static inline int be_msix_vec_get(struct be_adapter *adapter,
1967 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001969 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00001970}
1971
1972static int be_request_irq(struct be_adapter *adapter,
1973 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001974 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001975{
1976 struct net_device *netdev = adapter->netdev;
1977 int vec;
1978
1979 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001980 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001981 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00001982}
1983
Sathya Perla3abcded2010-10-03 22:12:27 -07001984static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1985 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001986{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001987 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001988 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989}
1990
1991static int be_msix_register(struct be_adapter *adapter)
1992{
Sathya Perla3abcded2010-10-03 22:12:27 -07001993 struct be_rx_obj *rxo;
1994 int status, i;
1995 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996
Sathya Perla3abcded2010-10-03 22:12:27 -07001997 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1998 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001999 if (status)
2000 goto err;
2001
Sathya Perla3abcded2010-10-03 22:12:27 -07002002 for_all_rx_queues(adapter, rxo, i) {
2003 sprintf(qname, "rxq%d", i);
2004 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2005 qname, rxo);
2006 if (status)
2007 goto err_msix;
2008 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002009
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002011
Sathya Perla3abcded2010-10-03 22:12:27 -07002012err_msix:
2013 be_free_irq(adapter, &adapter->tx_eq, adapter);
2014
2015 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2016 be_free_irq(adapter, &rxo->rx_eq, rxo);
2017
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018err:
2019 dev_warn(&adapter->pdev->dev,
2020 "MSIX Request IRQ failed - err %d\n", status);
2021 pci_disable_msix(adapter->pdev);
2022 adapter->msix_enabled = false;
2023 return status;
2024}
2025
2026static int be_irq_register(struct be_adapter *adapter)
2027{
2028 struct net_device *netdev = adapter->netdev;
2029 int status;
2030
2031 if (adapter->msix_enabled) {
2032 status = be_msix_register(adapter);
2033 if (status == 0)
2034 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002035 /* INTx is not supported for VF */
2036 if (!be_physfn(adapter))
2037 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038 }
2039
2040 /* INTx */
2041 netdev->irq = adapter->pdev->irq;
2042 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2043 adapter);
2044 if (status) {
2045 dev_err(&adapter->pdev->dev,
2046 "INTx request IRQ failed - err %d\n", status);
2047 return status;
2048 }
2049done:
2050 adapter->isr_registered = true;
2051 return 0;
2052}
2053
2054static void be_irq_unregister(struct be_adapter *adapter)
2055{
2056 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002057 struct be_rx_obj *rxo;
2058 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059
2060 if (!adapter->isr_registered)
2061 return;
2062
2063 /* INTx */
2064 if (!adapter->msix_enabled) {
2065 free_irq(netdev->irq, adapter);
2066 goto done;
2067 }
2068
2069 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002070 be_free_irq(adapter, &adapter->tx_eq, adapter);
2071
2072 for_all_rx_queues(adapter, rxo, i)
2073 be_free_irq(adapter, &rxo->rx_eq, rxo);
2074
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075done:
2076 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077}
2078
Sathya Perla889cd4b2010-05-30 23:33:45 +00002079static int be_close(struct net_device *netdev)
2080{
2081 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002082 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002083 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002084 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002085
Sathya Perla889cd4b2010-05-30 23:33:45 +00002086 be_async_mcc_disable(adapter);
2087
2088 netif_stop_queue(netdev);
2089 netif_carrier_off(netdev);
2090 adapter->link_up = false;
2091
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002092 if (!lancer_chip(adapter))
2093 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002094
2095 if (adapter->msix_enabled) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002096 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002097 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002098
2099 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002100 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002101 synchronize_irq(vec);
2102 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002103 } else {
2104 synchronize_irq(netdev->irq);
2105 }
2106 be_irq_unregister(adapter);
2107
Sathya Perla3abcded2010-10-03 22:12:27 -07002108 for_all_rx_queues(adapter, rxo, i)
2109 napi_disable(&rxo->rx_eq.napi);
2110
Sathya Perla889cd4b2010-05-30 23:33:45 +00002111 napi_disable(&tx_eq->napi);
2112
2113 /* Wait for all pending tx completions to arrive so that
2114 * all tx skbs are freed.
2115 */
2116 be_tx_compl_clean(adapter);
2117
2118 return 0;
2119}
2120
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121static int be_open(struct net_device *netdev)
2122{
2123 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002125 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002126 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002127 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002128 u8 mac_speed;
2129 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002130
Sathya Perla3abcded2010-10-03 22:12:27 -07002131 for_all_rx_queues(adapter, rxo, i) {
2132 be_post_rx_frags(rxo);
2133 napi_enable(&rxo->rx_eq.napi);
2134 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002135 napi_enable(&tx_eq->napi);
2136
2137 be_irq_register(adapter);
2138
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002139 if (!lancer_chip(adapter))
2140 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002141
2142 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002143 for_all_rx_queues(adapter, rxo, i) {
2144 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2145 be_cq_notify(adapter, rxo->cq.id, true, 0);
2146 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002147 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002148
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002149 /* Now that interrupts are on we can process async mcc */
2150 be_async_mcc_enable(adapter);
2151
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002152 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2153 &link_speed);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002154 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002155 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002156 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002157
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002158 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002159 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002160 if (status)
2161 goto err;
2162
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002163 status = be_cmd_set_flow_control(adapter,
2164 adapter->tx_fc, adapter->rx_fc);
2165 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002166 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002167 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002168
Sathya Perla889cd4b2010-05-30 23:33:45 +00002169 return 0;
2170err:
2171 be_close(adapter->netdev);
2172 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002173}
2174
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002175static int be_setup_wol(struct be_adapter *adapter, bool enable)
2176{
2177 struct be_dma_mem cmd;
2178 int status = 0;
2179 u8 mac[ETH_ALEN];
2180
2181 memset(mac, 0, ETH_ALEN);
2182
2183 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2184 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2185 if (cmd.va == NULL)
2186 return -1;
2187 memset(cmd.va, 0, cmd.size);
2188
2189 if (enable) {
2190 status = pci_write_config_dword(adapter->pdev,
2191 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2192 if (status) {
2193 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002194 "Could not enable Wake-on-lan\n");
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002195 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2196 cmd.dma);
2197 return status;
2198 }
2199 status = be_cmd_enable_magic_wol(adapter,
2200 adapter->netdev->dev_addr, &cmd);
2201 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2202 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2203 } else {
2204 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2205 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2206 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2207 }
2208
2209 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2210 return status;
2211}
2212
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002213/*
2214 * Generate a seed MAC address from the PF MAC Address using jhash.
2215 * MAC Address for VFs are assigned incrementally starting from the seed.
2216 * These addresses are programmed in the ASIC by the PF and the VF driver
2217 * queries for the MAC address during its probe.
2218 */
2219static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2220{
2221 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002222 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002223 u8 mac[ETH_ALEN];
2224
2225 be_vf_eth_addr_generate(adapter, mac);
2226
2227 for (vf = 0; vf < num_vfs; vf++) {
2228 status = be_cmd_pmac_add(adapter, mac,
2229 adapter->vf_cfg[vf].vf_if_handle,
2230 &adapter->vf_cfg[vf].vf_pmac_id);
2231 if (status)
2232 dev_err(&adapter->pdev->dev,
2233 "Mac address add failed for VF %d\n", vf);
2234 else
2235 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2236
2237 mac[5] += 1;
2238 }
2239 return status;
2240}
2241
2242static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2243{
2244 u32 vf;
2245
2246 for (vf = 0; vf < num_vfs; vf++) {
2247 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2248 be_cmd_pmac_del(adapter,
2249 adapter->vf_cfg[vf].vf_if_handle,
2250 adapter->vf_cfg[vf].vf_pmac_id);
2251 }
2252}
2253
Sathya Perla5fb379e2009-06-18 00:02:59 +00002254static int be_setup(struct be_adapter *adapter)
2255{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002256 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002257 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002259 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002261 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2262
2263 if (be_physfn(adapter)) {
2264 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2265 BE_IF_FLAGS_PROMISCUOUS |
2266 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2267 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002268
2269 if (be_multi_rxq(adapter)) {
2270 cap_flags |= BE_IF_FLAGS_RSS;
2271 en_flags |= BE_IF_FLAGS_RSS;
2272 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002273 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002274
2275 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2276 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002277 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002278 if (status != 0)
2279 goto do_none;
2280
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002281 if (be_physfn(adapter)) {
2282 while (vf < num_vfs) {
2283 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2284 | BE_IF_FLAGS_BROADCAST;
2285 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002286 mac, true,
2287 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002288 NULL, vf+1);
2289 if (status) {
2290 dev_err(&adapter->pdev->dev,
2291 "Interface Create failed for VF %d\n", vf);
2292 goto if_destroy;
2293 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002294 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002295 vf++;
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002296 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002297 } else if (!be_physfn(adapter)) {
2298 status = be_cmd_mac_addr_query(adapter, mac,
2299 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2300 if (!status) {
2301 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2302 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2303 }
2304 }
2305
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306 status = be_tx_queues_create(adapter);
2307 if (status != 0)
2308 goto if_destroy;
2309
2310 status = be_rx_queues_create(adapter);
2311 if (status != 0)
2312 goto tx_qs_destroy;
2313
Sathya Perla5fb379e2009-06-18 00:02:59 +00002314 status = be_mcc_queues_create(adapter);
2315 if (status != 0)
2316 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002317
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002318 if (be_physfn(adapter)) {
2319 status = be_vf_eth_addr_config(adapter);
2320 if (status)
2321 goto mcc_q_destroy;
2322 }
2323
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002324 adapter->link_speed = -1;
2325
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326 return 0;
2327
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002328mcc_q_destroy:
2329 if (be_physfn(adapter))
2330 be_vf_eth_addr_rem(adapter);
2331 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002332rx_qs_destroy:
2333 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334tx_qs_destroy:
2335 be_tx_queues_destroy(adapter);
2336if_destroy:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002337 for (vf = 0; vf < num_vfs; vf++)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002338 if (adapter->vf_cfg[vf].vf_if_handle)
2339 be_cmd_if_destroy(adapter,
2340 adapter->vf_cfg[vf].vf_if_handle);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002341 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342do_none:
2343 return status;
2344}
2345
Sathya Perla5fb379e2009-06-18 00:02:59 +00002346static int be_clear(struct be_adapter *adapter)
2347{
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002348 if (be_physfn(adapter))
2349 be_vf_eth_addr_rem(adapter);
2350
Sathya Perla1a8887d2009-08-17 00:58:41 +00002351 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002352 be_rx_queues_destroy(adapter);
2353 be_tx_queues_destroy(adapter);
2354
Sathya Perla8788fdc2009-07-27 22:52:03 +00002355 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002356
Sathya Perla2243e2e2009-11-22 22:02:03 +00002357 /* tell fw we're done with firing cmds */
2358 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002359 return 0;
2360}
2361
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362
Ajit Khaparde84517482009-09-04 03:12:16 +00002363#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002364static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002365 const u8 *p, u32 img_start, int image_size,
2366 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002367{
2368 u32 crc_offset;
2369 u8 flashed_crc[4];
2370 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002371
2372 crc_offset = hdr_size + img_start + image_size - 4;
2373
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002374 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002375
2376 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002377 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002378 if (status) {
2379 dev_err(&adapter->pdev->dev,
2380 "could not get crc from flash, not flashing redboot\n");
2381 return false;
2382 }
2383
2384 /*update redboot only if crc does not match*/
2385 if (!memcmp(flashed_crc, p, 4))
2386 return false;
2387 else
2388 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002389}
2390
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002391static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002392 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002393 struct be_dma_mem *flash_cmd, int num_of_images)
2394
Ajit Khaparde84517482009-09-04 03:12:16 +00002395{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002396 int status = 0, i, filehdr_size = 0;
2397 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002398 int num_bytes;
2399 const u8 *p = fw->data;
2400 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002401 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002402 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002403
Joe Perches215faf92010-12-21 02:16:10 -08002404 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002405 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2406 FLASH_IMAGE_MAX_SIZE_g3},
2407 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2408 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2409 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2410 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2412 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2414 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2415 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2416 FLASH_IMAGE_MAX_SIZE_g3},
2417 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2418 FLASH_IMAGE_MAX_SIZE_g3},
2419 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002420 FLASH_IMAGE_MAX_SIZE_g3},
2421 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2422 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002423 };
Joe Perches215faf92010-12-21 02:16:10 -08002424 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002425 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2426 FLASH_IMAGE_MAX_SIZE_g2},
2427 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2428 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2429 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2430 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2432 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2434 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2435 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2436 FLASH_IMAGE_MAX_SIZE_g2},
2437 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2438 FLASH_IMAGE_MAX_SIZE_g2},
2439 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2440 FLASH_IMAGE_MAX_SIZE_g2}
2441 };
2442
2443 if (adapter->generation == BE_GEN3) {
2444 pflashcomp = gen3_flash_types;
2445 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002446 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002447 } else {
2448 pflashcomp = gen2_flash_types;
2449 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002450 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002451 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002452 for (i = 0; i < num_comp; i++) {
2453 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2454 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2455 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002456 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2457 (!be_flash_redboot(adapter, fw->data,
2458 pflashcomp[i].offset, pflashcomp[i].size,
2459 filehdr_size)))
2460 continue;
2461 p = fw->data;
2462 p += filehdr_size + pflashcomp[i].offset
2463 + (num_of_images * sizeof(struct image_hdr));
2464 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002465 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002466 total_bytes = pflashcomp[i].size;
2467 while (total_bytes) {
2468 if (total_bytes > 32*1024)
2469 num_bytes = 32*1024;
2470 else
2471 num_bytes = total_bytes;
2472 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002473
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002474 if (!total_bytes)
2475 flash_op = FLASHROM_OPER_FLASH;
2476 else
2477 flash_op = FLASHROM_OPER_SAVE;
2478 memcpy(req->params.data_buf, p, num_bytes);
2479 p += num_bytes;
2480 status = be_cmd_write_flashrom(adapter, flash_cmd,
2481 pflashcomp[i].optype, flash_op, num_bytes);
2482 if (status) {
2483 dev_err(&adapter->pdev->dev,
2484 "cmd to write to flash rom failed.\n");
2485 return -1;
2486 }
2487 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002488 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002489 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002490 return 0;
2491}
2492
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002493static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2494{
2495 if (fhdr == NULL)
2496 return 0;
2497 if (fhdr->build[0] == '3')
2498 return BE_GEN3;
2499 else if (fhdr->build[0] == '2')
2500 return BE_GEN2;
2501 else
2502 return 0;
2503}
2504
Ajit Khaparde84517482009-09-04 03:12:16 +00002505int be_load_fw(struct be_adapter *adapter, u8 *func)
2506{
2507 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2508 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002509 struct flash_file_hdr_g2 *fhdr;
2510 struct flash_file_hdr_g3 *fhdr3;
2511 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002512 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002513 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002514 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002515
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002516 if (!netif_running(adapter->netdev)) {
2517 dev_err(&adapter->pdev->dev,
2518 "Firmware load not allowed (interface is down)\n");
2519 return -EPERM;
2520 }
2521
Ajit Khaparde84517482009-09-04 03:12:16 +00002522 strcpy(fw_file, func);
2523
2524 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2525 if (status)
2526 goto fw_exit;
2527
2528 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002529 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002530 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2531
Ajit Khaparde84517482009-09-04 03:12:16 +00002532 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2533 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2534 &flash_cmd.dma);
2535 if (!flash_cmd.va) {
2536 status = -ENOMEM;
2537 dev_err(&adapter->pdev->dev,
2538 "Memory allocation failure while flashing\n");
2539 goto fw_exit;
2540 }
2541
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002542 if ((adapter->generation == BE_GEN3) &&
2543 (get_ufigen_type(fhdr) == BE_GEN3)) {
2544 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002545 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2546 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002547 img_hdr_ptr = (struct image_hdr *) (fw->data +
2548 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002549 i * sizeof(struct image_hdr)));
2550 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2551 status = be_flash_data(adapter, fw, &flash_cmd,
2552 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002553 }
2554 } else if ((adapter->generation == BE_GEN2) &&
2555 (get_ufigen_type(fhdr) == BE_GEN2)) {
2556 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2557 } else {
2558 dev_err(&adapter->pdev->dev,
2559 "UFI and Interface are not compatible for flashing\n");
2560 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002561 }
2562
2563 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2564 flash_cmd.dma);
2565 if (status) {
2566 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2567 goto fw_exit;
2568 }
2569
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002570 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002571
2572fw_exit:
2573 release_firmware(fw);
2574 return status;
2575}
2576
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002577static struct net_device_ops be_netdev_ops = {
2578 .ndo_open = be_open,
2579 .ndo_stop = be_close,
2580 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002581 .ndo_set_rx_mode = be_set_multicast_list,
2582 .ndo_set_mac_address = be_mac_addr_set,
2583 .ndo_change_mtu = be_change_mtu,
2584 .ndo_validate_addr = eth_validate_addr,
2585 .ndo_vlan_rx_register = be_vlan_register,
2586 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2587 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002588 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002589 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002590 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002591 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002592};
2593
2594static void be_netdev_init(struct net_device *netdev)
2595{
2596 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002597 struct be_rx_obj *rxo;
2598 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002599
2600 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Michał Mirosław79032642010-11-30 06:38:00 +00002601 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2602 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ajit Khaparde49e4b8472010-06-14 04:56:07 +00002603 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002604
Michał Mirosław79032642010-11-30 06:38:00 +00002605 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2606 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002607
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002608 if (lancer_chip(adapter))
2609 netdev->vlan_features |= NETIF_F_TSO6;
2610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002611 netdev->flags |= IFF_MULTICAST;
2612
Ajit Khaparde728a9972009-04-13 15:41:22 -07002613 adapter->rx_csum = true;
2614
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002615 /* Default settings for Rx and Tx flow control */
2616 adapter->rx_fc = true;
2617 adapter->tx_fc = true;
2618
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002619 netif_set_gso_max_size(netdev, 65535);
2620
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002621 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2622
2623 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2624
Sathya Perla3abcded2010-10-03 22:12:27 -07002625 for_all_rx_queues(adapter, rxo, i)
2626 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2627 BE_NAPI_WEIGHT);
2628
Sathya Perla5fb379e2009-06-18 00:02:59 +00002629 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630 BE_NAPI_WEIGHT);
2631
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002632 netif_stop_queue(netdev);
2633}
2634
2635static void be_unmap_pci_bars(struct be_adapter *adapter)
2636{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002637 if (adapter->csr)
2638 iounmap(adapter->csr);
2639 if (adapter->db)
2640 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002641 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002642 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002643}
2644
2645static int be_map_pci_bars(struct be_adapter *adapter)
2646{
2647 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002648 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002649
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002650 if (lancer_chip(adapter)) {
2651 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2652 pci_resource_len(adapter->pdev, 0));
2653 if (addr == NULL)
2654 return -ENOMEM;
2655 adapter->db = addr;
2656 return 0;
2657 }
2658
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002659 if (be_physfn(adapter)) {
2660 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2661 pci_resource_len(adapter->pdev, 2));
2662 if (addr == NULL)
2663 return -ENOMEM;
2664 adapter->csr = addr;
2665 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002666
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002667 if (adapter->generation == BE_GEN2) {
2668 pcicfg_reg = 1;
2669 db_reg = 4;
2670 } else {
2671 pcicfg_reg = 0;
2672 if (be_physfn(adapter))
2673 db_reg = 4;
2674 else
2675 db_reg = 0;
2676 }
2677 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2678 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002679 if (addr == NULL)
2680 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002681 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002682
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002683 if (be_physfn(adapter)) {
2684 addr = ioremap_nocache(
2685 pci_resource_start(adapter->pdev, pcicfg_reg),
2686 pci_resource_len(adapter->pdev, pcicfg_reg));
2687 if (addr == NULL)
2688 goto pci_map_err;
2689 adapter->pcicfg = addr;
2690 } else
2691 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692
2693 return 0;
2694pci_map_err:
2695 be_unmap_pci_bars(adapter);
2696 return -ENOMEM;
2697}
2698
2699
2700static void be_ctrl_cleanup(struct be_adapter *adapter)
2701{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002702 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703
2704 be_unmap_pci_bars(adapter);
2705
2706 if (mem->va)
2707 pci_free_consistent(adapter->pdev, mem->size,
2708 mem->va, mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002709
2710 mem = &adapter->mc_cmd_mem;
2711 if (mem->va)
2712 pci_free_consistent(adapter->pdev, mem->size,
2713 mem->va, mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002714}
2715
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002716static int be_ctrl_init(struct be_adapter *adapter)
2717{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002718 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2719 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002720 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002721 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002722
2723 status = be_map_pci_bars(adapter);
2724 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002725 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002726
2727 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2728 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2729 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2730 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002731 status = -ENOMEM;
2732 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002733 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002734
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002735 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2736 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2737 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2738 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002739
2740 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2741 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2742 &mc_cmd_mem->dma);
2743 if (mc_cmd_mem->va == NULL) {
2744 status = -ENOMEM;
2745 goto free_mbox;
2746 }
2747 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2748
Ivan Vecera29849612010-12-14 05:43:19 +00002749 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002750 spin_lock_init(&adapter->mcc_lock);
2751 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002752
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002753 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002754 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002755 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002756
2757free_mbox:
2758 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2759 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2760
2761unmap_pci_bars:
2762 be_unmap_pci_bars(adapter);
2763
2764done:
2765 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002766}
2767
2768static void be_stats_cleanup(struct be_adapter *adapter)
2769{
Sathya Perla3abcded2010-10-03 22:12:27 -07002770 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002771
2772 if (cmd->va)
2773 pci_free_consistent(adapter->pdev, cmd->size,
2774 cmd->va, cmd->dma);
2775}
2776
2777static int be_stats_init(struct be_adapter *adapter)
2778{
Sathya Perla3abcded2010-10-03 22:12:27 -07002779 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002780
2781 cmd->size = sizeof(struct be_cmd_req_get_stats);
2782 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2783 if (cmd->va == NULL)
2784 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002785 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786 return 0;
2787}
2788
2789static void __devexit be_remove(struct pci_dev *pdev)
2790{
2791 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002792
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002793 if (!adapter)
2794 return;
2795
Somnath Koturf203af72010-10-25 23:01:03 +00002796 cancel_delayed_work_sync(&adapter->work);
2797
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002798 unregister_netdev(adapter->netdev);
2799
Sathya Perla5fb379e2009-06-18 00:02:59 +00002800 be_clear(adapter);
2801
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002802 be_stats_cleanup(adapter);
2803
2804 be_ctrl_cleanup(adapter);
2805
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002806 be_sriov_disable(adapter);
2807
Sathya Perla8d56ff12009-11-22 22:02:26 +00002808 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002809
2810 pci_set_drvdata(pdev, NULL);
2811 pci_release_regions(pdev);
2812 pci_disable_device(pdev);
2813
2814 free_netdev(adapter->netdev);
2815}
2816
Sathya Perla2243e2e2009-11-22 22:02:03 +00002817static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002818{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002819 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002820 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002821
Sathya Perla8788fdc2009-07-27 22:52:03 +00002822 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002823 if (status)
2824 return status;
2825
Sathya Perla3abcded2010-10-03 22:12:27 -07002826 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2827 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002828 if (status)
2829 return status;
2830
2831 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002832
2833 if (be_physfn(adapter)) {
2834 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002835 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002836
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002837 if (status)
2838 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002839
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002840 if (!is_valid_ether_addr(mac))
2841 return -EADDRNOTAVAIL;
2842
2843 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2844 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2845 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002846
Ajit Khaparde3486be22010-07-23 02:04:54 +00002847 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002848 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2849 else
2850 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2851
Sathya Perla2243e2e2009-11-22 22:02:03 +00002852 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853}
2854
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002855static int be_dev_family_check(struct be_adapter *adapter)
2856{
2857 struct pci_dev *pdev = adapter->pdev;
2858 u32 sli_intf = 0, if_type;
2859
2860 switch (pdev->device) {
2861 case BE_DEVICE_ID1:
2862 case OC_DEVICE_ID1:
2863 adapter->generation = BE_GEN2;
2864 break;
2865 case BE_DEVICE_ID2:
2866 case OC_DEVICE_ID2:
2867 adapter->generation = BE_GEN3;
2868 break;
2869 case OC_DEVICE_ID3:
2870 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2871 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2872 SLI_INTF_IF_TYPE_SHIFT;
2873
2874 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2875 if_type != 0x02) {
2876 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2877 return -EINVAL;
2878 }
2879 if (num_vfs > 0) {
2880 dev_err(&pdev->dev, "VFs not supported\n");
2881 return -EINVAL;
2882 }
2883 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2884 SLI_INTF_FAMILY_SHIFT);
2885 adapter->generation = BE_GEN3;
2886 break;
2887 default:
2888 adapter->generation = 0;
2889 }
2890 return 0;
2891}
2892
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002893static int __devinit be_probe(struct pci_dev *pdev,
2894 const struct pci_device_id *pdev_id)
2895{
2896 int status = 0;
2897 struct be_adapter *adapter;
2898 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899
2900 status = pci_enable_device(pdev);
2901 if (status)
2902 goto do_none;
2903
2904 status = pci_request_regions(pdev, DRV_NAME);
2905 if (status)
2906 goto disable_dev;
2907 pci_set_master(pdev);
2908
2909 netdev = alloc_etherdev(sizeof(struct be_adapter));
2910 if (netdev == NULL) {
2911 status = -ENOMEM;
2912 goto rel_reg;
2913 }
2914 adapter = netdev_priv(netdev);
2915 adapter->pdev = pdev;
2916 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002917
2918 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00002919 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002920 goto free_netdev;
2921
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002922 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002923 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924
Yang Hongyange9304382009-04-13 14:40:14 -07002925 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002926 if (!status) {
2927 netdev->features |= NETIF_F_HIGHDMA;
2928 } else {
Yang Hongyange9304382009-04-13 14:40:14 -07002929 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002930 if (status) {
2931 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2932 goto free_netdev;
2933 }
2934 }
2935
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002936 be_sriov_enable(adapter);
2937
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002938 status = be_ctrl_init(adapter);
2939 if (status)
2940 goto free_netdev;
2941
Sathya Perla2243e2e2009-11-22 22:02:03 +00002942 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002943 if (be_physfn(adapter)) {
2944 status = be_cmd_POST(adapter);
2945 if (status)
2946 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002947 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002948
2949 /* tell fw we're ready to fire cmds */
2950 status = be_cmd_fw_init(adapter);
2951 if (status)
2952 goto ctrl_clean;
2953
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07002954 if (be_physfn(adapter)) {
2955 status = be_cmd_reset_function(adapter);
2956 if (status)
2957 goto ctrl_clean;
2958 }
2959
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002960 status = be_stats_init(adapter);
2961 if (status)
2962 goto ctrl_clean;
2963
Sathya Perla2243e2e2009-11-22 22:02:03 +00002964 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002965 if (status)
2966 goto stats_clean;
2967
Sathya Perla3abcded2010-10-03 22:12:27 -07002968 be_msix_enable(adapter);
2969
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002970 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002971
Sathya Perla5fb379e2009-06-18 00:02:59 +00002972 status = be_setup(adapter);
2973 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002974 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002975
Sathya Perla3abcded2010-10-03 22:12:27 -07002976 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002977 status = register_netdev(netdev);
2978 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002979 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00002980 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002982 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00002983 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002984 return 0;
2985
Sathya Perla5fb379e2009-06-18 00:02:59 +00002986unsetup:
2987 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07002988msix_disable:
2989 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002990stats_clean:
2991 be_stats_cleanup(adapter);
2992ctrl_clean:
2993 be_ctrl_cleanup(adapter);
2994free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002995 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002996 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002997 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998rel_reg:
2999 pci_release_regions(pdev);
3000disable_dev:
3001 pci_disable_device(pdev);
3002do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003003 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003004 return status;
3005}
3006
3007static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3008{
3009 struct be_adapter *adapter = pci_get_drvdata(pdev);
3010 struct net_device *netdev = adapter->netdev;
3011
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003012 if (adapter->wol)
3013 be_setup_wol(adapter, true);
3014
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003015 netif_device_detach(netdev);
3016 if (netif_running(netdev)) {
3017 rtnl_lock();
3018 be_close(netdev);
3019 rtnl_unlock();
3020 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003021 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003022 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023
3024 pci_save_state(pdev);
3025 pci_disable_device(pdev);
3026 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3027 return 0;
3028}
3029
3030static int be_resume(struct pci_dev *pdev)
3031{
3032 int status = 0;
3033 struct be_adapter *adapter = pci_get_drvdata(pdev);
3034 struct net_device *netdev = adapter->netdev;
3035
3036 netif_device_detach(netdev);
3037
3038 status = pci_enable_device(pdev);
3039 if (status)
3040 return status;
3041
3042 pci_set_power_state(pdev, 0);
3043 pci_restore_state(pdev);
3044
Sathya Perla2243e2e2009-11-22 22:02:03 +00003045 /* tell fw we're ready to fire cmds */
3046 status = be_cmd_fw_init(adapter);
3047 if (status)
3048 return status;
3049
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003050 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003051 if (netif_running(netdev)) {
3052 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053 be_open(netdev);
3054 rtnl_unlock();
3055 }
3056 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003057
3058 if (adapter->wol)
3059 be_setup_wol(adapter, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003060 return 0;
3061}
3062
Sathya Perla82456b02010-02-17 01:35:37 +00003063/*
3064 * An FLR will stop BE from DMAing any data.
3065 */
3066static void be_shutdown(struct pci_dev *pdev)
3067{
3068 struct be_adapter *adapter = pci_get_drvdata(pdev);
3069 struct net_device *netdev = adapter->netdev;
3070
3071 netif_device_detach(netdev);
3072
3073 be_cmd_reset_function(adapter);
3074
3075 if (adapter->wol)
3076 be_setup_wol(adapter, true);
3077
3078 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003079}
3080
Sathya Perlacf588472010-02-14 21:22:01 +00003081static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3082 pci_channel_state_t state)
3083{
3084 struct be_adapter *adapter = pci_get_drvdata(pdev);
3085 struct net_device *netdev = adapter->netdev;
3086
3087 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3088
3089 adapter->eeh_err = true;
3090
3091 netif_device_detach(netdev);
3092
3093 if (netif_running(netdev)) {
3094 rtnl_lock();
3095 be_close(netdev);
3096 rtnl_unlock();
3097 }
3098 be_clear(adapter);
3099
3100 if (state == pci_channel_io_perm_failure)
3101 return PCI_ERS_RESULT_DISCONNECT;
3102
3103 pci_disable_device(pdev);
3104
3105 return PCI_ERS_RESULT_NEED_RESET;
3106}
3107
3108static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3109{
3110 struct be_adapter *adapter = pci_get_drvdata(pdev);
3111 int status;
3112
3113 dev_info(&adapter->pdev->dev, "EEH reset\n");
3114 adapter->eeh_err = false;
3115
3116 status = pci_enable_device(pdev);
3117 if (status)
3118 return PCI_ERS_RESULT_DISCONNECT;
3119
3120 pci_set_master(pdev);
3121 pci_set_power_state(pdev, 0);
3122 pci_restore_state(pdev);
3123
3124 /* Check if card is ok and fw is ready */
3125 status = be_cmd_POST(adapter);
3126 if (status)
3127 return PCI_ERS_RESULT_DISCONNECT;
3128
3129 return PCI_ERS_RESULT_RECOVERED;
3130}
3131
3132static void be_eeh_resume(struct pci_dev *pdev)
3133{
3134 int status = 0;
3135 struct be_adapter *adapter = pci_get_drvdata(pdev);
3136 struct net_device *netdev = adapter->netdev;
3137
3138 dev_info(&adapter->pdev->dev, "EEH resume\n");
3139
3140 pci_save_state(pdev);
3141
3142 /* tell fw we're ready to fire cmds */
3143 status = be_cmd_fw_init(adapter);
3144 if (status)
3145 goto err;
3146
3147 status = be_setup(adapter);
3148 if (status)
3149 goto err;
3150
3151 if (netif_running(netdev)) {
3152 status = be_open(netdev);
3153 if (status)
3154 goto err;
3155 }
3156 netif_device_attach(netdev);
3157 return;
3158err:
3159 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003160}
3161
3162static struct pci_error_handlers be_eeh_handlers = {
3163 .error_detected = be_eeh_err_detected,
3164 .slot_reset = be_eeh_reset,
3165 .resume = be_eeh_resume,
3166};
3167
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003168static struct pci_driver be_driver = {
3169 .name = DRV_NAME,
3170 .id_table = be_dev_ids,
3171 .probe = be_probe,
3172 .remove = be_remove,
3173 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003174 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003175 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003176 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003177};
3178
3179static int __init be_init_module(void)
3180{
Joe Perches8e95a202009-12-03 07:58:21 +00003181 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3182 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003183 printk(KERN_WARNING DRV_NAME
3184 " : Module param rx_frag_size must be 2048/4096/8192."
3185 " Using 2048\n");
3186 rx_frag_size = 2048;
3187 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003188
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003189 if (num_vfs > 32) {
3190 printk(KERN_WARNING DRV_NAME
3191 " : Module param num_vfs must not be greater than 32."
3192 "Using 32\n");
3193 num_vfs = 32;
3194 }
3195
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196 return pci_register_driver(&be_driver);
3197}
3198module_init(be_init_module);
3199
3200static void __exit be_exit_module(void)
3201{
3202 pci_unregister_driver(&be_driver);
3203}
3204module_exit(be_exit_module);