blob: 9a1cd28b426d3cea073d3542a4966c4e61300db2 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030module_param(rx_frag_size, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perla6b7c5b92009-03-11 23:32:03 -070044 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000047/* UE Status Low CSR */
48static char *ue_status_low_desc[] = {
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
83static char *ue_status_hi_desc[] = {
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
107 "NETC"
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700117
Sathya Perla3abcded2010-10-03 22:12:27 -0700118static inline bool be_multi_rxq(struct be_adapter *adapter)
119{
120 return (adapter->num_rx_qs > 1);
121}
122
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
124{
125 struct be_dma_mem *mem = &q->dma_mem;
126 if (mem->va)
127 pci_free_consistent(adapter->pdev, mem->size,
128 mem->va, mem->dma);
129}
130
131static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
132 u16 len, u16 entry_size)
133{
134 struct be_dma_mem *mem = &q->dma_mem;
135
136 memset(q, 0, sizeof(*q));
137 q->len = len;
138 q->entry_size = entry_size;
139 mem->size = len * entry_size;
140 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
141 if (!mem->va)
142 return -1;
143 memset(mem->va, 0, mem->size);
144 return 0;
145}
146
Sathya Perla8788fdc2009-07-27 22:52:03 +0000147static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000149 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 u32 reg = ioread32(addr);
151 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000152
Sathya Perlacf588472010-02-14 21:22:01 +0000153 if (adapter->eeh_err)
154 return;
155
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700159 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 iowrite32(reg, addr);
164}
165
Sathya Perla8788fdc2009-07-27 22:52:03 +0000166static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167{
168 u32 val = 0;
169 val |= qid & DB_RQ_RING_ID_MASK;
170 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000171
172 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000173 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_TXULP_RING_ID_MASK;
180 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187 bool arm, bool clear_int, u16 num_popped)
188{
189 u32 val = 0;
190 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlacf588472010-02-14 21:22:01 +0000191
192 if (adapter->eeh_err)
193 return;
194
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlacf588472010-02-14 21:22:01 +0000208
209 if (adapter->eeh_err)
210 return;
211
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 if (arm)
213 val |= 1 << DB_CQ_REARM_SHIFT;
214 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218static int be_mac_addr_set(struct net_device *netdev, void *p)
219{
220 struct be_adapter *adapter = netdev_priv(netdev);
221 struct sockaddr *addr = p;
222 int status = 0;
223
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000224 if (!is_valid_ether_addr(addr->sa_data))
225 return -EADDRNOTAVAIL;
226
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000227 /* MAC addr configuration will be done in hardware for VFs
228 * by their corresponding PFs. Just copy to netdev addr here
229 */
230 if (!be_physfn(adapter))
231 goto netdev_addr;
232
Sathya Perlaa65027e2009-08-17 00:58:04 +0000233 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
234 if (status)
235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238 adapter->if_handle, &adapter->pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000239netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
Sathya Perlab31c50a2009-09-17 10:30:13 -0700246void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247{
Sathya Perla3abcded2010-10-03 22:12:27 -0700248 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700249 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
250 struct be_port_rxf_stats *port_stats =
251 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700252 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000253 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700254 struct be_rx_obj *rxo;
255 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Sathya Perla3abcded2010-10-03 22:12:27 -0700257 memset(dev_stats, 0, sizeof(*dev_stats));
258 for_all_rx_queues(adapter, rxo, i) {
259 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
260 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
261 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
262 /* no space in linux buffers: best possible approximation */
263 dev_stats->rx_dropped +=
264 erx_stats->rx_drops_no_fragments[rxo->q.id];
265 }
266
267 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
268 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700269
270 /* bad pkts received */
271 dev_stats->rx_errors = port_stats->rx_crc_errors +
272 port_stats->rx_alignment_symbol_errors +
273 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000274 port_stats->rx_out_range_errors +
275 port_stats->rx_frame_too_long +
276 port_stats->rx_dropped_too_small +
277 port_stats->rx_dropped_too_short +
278 port_stats->rx_dropped_header_too_small +
279 port_stats->rx_dropped_tcp_length +
280 port_stats->rx_dropped_runt +
281 port_stats->rx_tcp_checksum_errs +
282 port_stats->rx_ip_checksum_errs +
283 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700284
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285 /* detailed rx errors */
286 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000287 port_stats->rx_out_range_errors +
288 port_stats->rx_frame_too_long;
289
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700290 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
291
292 /* frame alignment errors */
293 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000294
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700295 /* receiver fifo overrun */
296 /* drops_no_pbuf is no per i/f, it's per BE card */
297 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
298 port_stats->rx_input_fifo_overflow +
299 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700300}
301
Sathya Perla8788fdc2009-07-27 22:52:03 +0000302void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700303{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700304 struct net_device *netdev = adapter->netdev;
305
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000307 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000308 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000309 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310 netif_start_queue(netdev);
311 netif_carrier_on(netdev);
312 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000313 } else {
314 netif_stop_queue(netdev);
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700317 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000318 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700319 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700324{
Sathya Perla3abcded2010-10-03 22:12:27 -0700325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700338
339 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700340 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700341 return;
342
Sathya Perla3abcded2010-10-03 22:12:27 -0700343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700344 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700345
Sathya Perla4097f662009-03-24 16:40:13 -0700346 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700358
359 rx_eq->cur_eqd = eqd;
360}
361
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
Sathya Perla4097f662009-03-24 16:40:13 -0700373static void be_tx_rate_update(struct be_adapter *adapter)
374{
Sathya Perla3abcded2010-10-03 22:12:27 -0700375 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700394static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700396{
Sathya Perla3abcded2010-10-03 22:12:27 -0700397 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700402 if (stopped)
403 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
407static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
408{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700409 int cnt = (skb->len > skb->data_len);
410
411 cnt += skb_shinfo(skb)->nr_frags;
412
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700413 /* to account for hdr wrb */
414 cnt++;
415 if (cnt & 1) {
416 /* add a dummy to make it an even num */
417 cnt++;
418 *dummy = true;
419 } else
420 *dummy = false;
421 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422 return cnt;
423}
424
425static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
426{
427 wrb->frag_pa_hi = upper_32_bits(addr);
428 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
429 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
430}
431
432static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
433 bool vlan, u32 wrb_cnt, u32 len)
434{
435 memset(hdr, 0, sizeof(*hdr));
436
437 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
438
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000439 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700440 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
441 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
442 hdr, skb_shinfo(skb)->gso_size);
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000443 if (skb_is_gso_v6(skb))
444 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700445 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
446 if (is_tcp_pkt(skb))
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
448 else if (is_udp_pkt(skb))
449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
450 }
451
452 if (vlan && vlan_tx_tag_present(skb)) {
453 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
455 hdr, vlan_tx_tag_get(skb));
456 }
457
458 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
461 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
462}
463
Sathya Perla7101e112010-03-22 20:41:12 +0000464static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
465 bool unmap_single)
466{
467 dma_addr_t dma;
468
469 be_dws_le_to_cpu(wrb, sizeof(*wrb));
470
471 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000472 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000473 if (unmap_single)
474 pci_unmap_single(pdev, dma, wrb->frag_len,
475 PCI_DMA_TODEVICE);
476 else
477 pci_unmap_page(pdev, dma, wrb->frag_len,
478 PCI_DMA_TODEVICE);
479 }
480}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481
482static int make_tx_wrbs(struct be_adapter *adapter,
483 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
484{
Sathya Perla7101e112010-03-22 20:41:12 +0000485 dma_addr_t busaddr;
486 int i, copied = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 struct pci_dev *pdev = adapter->pdev;
488 struct sk_buff *first_skb = skb;
489 struct be_queue_info *txq = &adapter->tx_obj.q;
490 struct be_eth_wrb *wrb;
491 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000492 bool map_single = false;
493 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700494
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700495 hdr = queue_head_node(txq);
496 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000497 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498
David S. Millerebc8d2a2009-06-09 01:01:31 -0700499 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700500 int len = skb_headlen(skb);
Alexander Duycka73b7962009-12-02 16:48:18 +0000501 busaddr = pci_map_single(pdev, skb->data, len,
502 PCI_DMA_TODEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000503 if (pci_dma_mapping_error(pdev, busaddr))
504 goto dma_err;
505 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700506 wrb = queue_head_node(txq);
507 wrb_fill(wrb, busaddr, len);
508 be_dws_cpu_to_le(wrb, sizeof(*wrb));
509 queue_head_inc(txq);
510 copied += len;
511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512
David S. Millerebc8d2a2009-06-09 01:01:31 -0700513 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
514 struct skb_frag_struct *frag =
515 &skb_shinfo(skb)->frags[i];
Alexander Duycka73b7962009-12-02 16:48:18 +0000516 busaddr = pci_map_page(pdev, frag->page,
517 frag->page_offset,
518 frag->size, PCI_DMA_TODEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000519 if (pci_dma_mapping_error(pdev, busaddr))
520 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700521 wrb = queue_head_node(txq);
522 wrb_fill(wrb, busaddr, frag->size);
523 be_dws_cpu_to_le(wrb, sizeof(*wrb));
524 queue_head_inc(txq);
525 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526 }
527
528 if (dummy_wrb) {
529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, 0, 0);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532 queue_head_inc(txq);
533 }
534
535 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
536 wrb_cnt, copied);
537 be_dws_cpu_to_le(hdr, sizeof(*hdr));
538
539 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000540dma_err:
541 txq->head = map_head;
542 while (copied) {
543 wrb = queue_head_node(txq);
544 unmap_tx_frag(pdev, wrb, map_single);
545 map_single = false;
546 copied -= wrb->frag_len;
547 queue_head_inc(txq);
548 }
549 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700550}
551
Stephen Hemminger613573252009-08-31 19:50:58 +0000552static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700553 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700554{
555 struct be_adapter *adapter = netdev_priv(netdev);
556 struct be_tx_obj *tx_obj = &adapter->tx_obj;
557 struct be_queue_info *txq = &tx_obj->q;
558 u32 wrb_cnt = 0, copied = 0;
559 u32 start = txq->head;
560 bool dummy_wrb, stopped = false;
561
562 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
563
564 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000565 if (copied) {
566 /* record the sent skb in the sent_skb table */
567 BUG_ON(tx_obj->sent_skb_list[start]);
568 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700569
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000570 /* Ensure txq has space for the next skb; Else stop the queue
571 * *BEFORE* ringing the tx doorbell, so that we serialze the
572 * tx compls of the current transmit which'll wake up the queue
573 */
Sathya Perla7101e112010-03-22 20:41:12 +0000574 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000575 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
576 txq->len) {
577 netif_stop_queue(netdev);
578 stopped = true;
579 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000581 be_txq_notify(adapter, txq->id, wrb_cnt);
582
Ajit Khaparde91992e42010-02-19 13:57:12 +0000583 be_tx_stats_update(adapter, wrb_cnt, copied,
584 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000585 } else {
586 txq->head = start;
587 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 return NETDEV_TX_OK;
590}
591
592static int be_change_mtu(struct net_device *netdev, int new_mtu)
593{
594 struct be_adapter *adapter = netdev_priv(netdev);
595 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000596 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
597 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598 dev_info(&adapter->pdev->dev,
599 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000600 BE_MIN_MTU,
601 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602 return -EINVAL;
603 }
604 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
605 netdev->mtu, new_mtu);
606 netdev->mtu = new_mtu;
607 return 0;
608}
609
610/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000611 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
612 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000614static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700616 u16 vtag[BE_NUM_VLANS_SUPPORTED];
617 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000618 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000619 u32 if_handle;
620
621 if (vf) {
622 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
623 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
624 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626
Ajit Khaparde82903e42010-02-09 01:34:57 +0000627 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 /* Construct VLAN Table to give to HW */
629 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
630 if (adapter->vlan_tag[i]) {
631 vtag[ntags] = cpu_to_le16(i);
632 ntags++;
633 }
634 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700635 status = be_cmd_vlan_config(adapter, adapter->if_handle,
636 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700638 status = be_cmd_vlan_config(adapter, adapter->if_handle,
639 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000641
Sathya Perlab31c50a2009-09-17 10:30:13 -0700642 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643}
644
645static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
646{
647 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650}
651
652static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
653{
654 struct be_adapter *adapter = netdev_priv(netdev);
655
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000656 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000657 if (!be_physfn(adapter))
658 return;
659
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000661 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000662 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
665static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
666{
667 struct be_adapter *adapter = netdev_priv(netdev);
668
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000669 adapter->vlans_added--;
670 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
671
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000672 if (!be_physfn(adapter))
673 return;
674
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000676 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000677 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678}
679
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680static void be_set_multicast_list(struct net_device *netdev)
681{
682 struct be_adapter *adapter = netdev_priv(netdev);
683
684 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000685 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000686 adapter->promiscuous = true;
687 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000689
690 /* BE was previously in promiscous mode; disable it */
691 if (adapter->promiscuous) {
692 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000693 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000694 }
695
Sathya Perlae7b909a2009-11-22 22:01:10 +0000696 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000697 if (netdev->flags & IFF_ALLMULTI ||
698 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000699 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000700 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000701 goto done;
702 }
703
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000704 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800705 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000706done:
707 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708}
709
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000710static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
711{
712 struct be_adapter *adapter = netdev_priv(netdev);
713 int status;
714
715 if (!adapter->sriov_enabled)
716 return -EPERM;
717
718 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
719 return -EINVAL;
720
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000721 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
722 status = be_cmd_pmac_del(adapter,
723 adapter->vf_cfg[vf].vf_if_handle,
724 adapter->vf_cfg[vf].vf_pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000725
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000726 status = be_cmd_pmac_add(adapter, mac,
727 adapter->vf_cfg[vf].vf_if_handle,
728 &adapter->vf_cfg[vf].vf_pmac_id);
729
730 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000731 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
732 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000733 else
734 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
735
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000736 return status;
737}
738
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000739static int be_get_vf_config(struct net_device *netdev, int vf,
740 struct ifla_vf_info *vi)
741{
742 struct be_adapter *adapter = netdev_priv(netdev);
743
744 if (!adapter->sriov_enabled)
745 return -EPERM;
746
747 if (vf >= num_vfs)
748 return -EINVAL;
749
750 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000751 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000752 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000753 vi->qos = 0;
754 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
755
756 return 0;
757}
758
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000759static int be_set_vf_vlan(struct net_device *netdev,
760 int vf, u16 vlan, u8 qos)
761{
762 struct be_adapter *adapter = netdev_priv(netdev);
763 int status = 0;
764
765 if (!adapter->sriov_enabled)
766 return -EPERM;
767
768 if ((vf >= num_vfs) || (vlan > 4095))
769 return -EINVAL;
770
771 if (vlan) {
772 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
773 adapter->vlans_added++;
774 } else {
775 adapter->vf_cfg[vf].vf_vlan_tag = 0;
776 adapter->vlans_added--;
777 }
778
779 status = be_vid_config(adapter, true, vf);
780
781 if (status)
782 dev_info(&adapter->pdev->dev,
783 "VLAN %d config on VF %d failed\n", vlan, vf);
784 return status;
785}
786
Ajit Khapardee1d18732010-07-23 01:52:13 +0000787static int be_set_vf_tx_rate(struct net_device *netdev,
788 int vf, int rate)
789{
790 struct be_adapter *adapter = netdev_priv(netdev);
791 int status = 0;
792
793 if (!adapter->sriov_enabled)
794 return -EPERM;
795
796 if ((vf >= num_vfs) || (rate < 0))
797 return -EINVAL;
798
799 if (rate > 10000)
800 rate = 10000;
801
802 adapter->vf_cfg[vf].vf_tx_rate = rate;
803 status = be_cmd_set_qos(adapter, rate / 10, vf);
804
805 if (status)
806 dev_info(&adapter->pdev->dev,
807 "tx rate %d on VF %d failed\n", rate, vf);
808 return status;
809}
810
Sathya Perla3abcded2010-10-03 22:12:27 -0700811static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812{
Sathya Perla3abcded2010-10-03 22:12:27 -0700813 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700814 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815
Sathya Perla4097f662009-03-24 16:40:13 -0700816 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700817 if (time_before(now, stats->rx_jiffies)) {
818 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700819 return;
820 }
821
822 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700823 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700824 return;
825
Sathya Perla3abcded2010-10-03 22:12:27 -0700826 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
827 now - stats->rx_jiffies);
828 stats->rx_jiffies = now;
829 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700830}
831
Sathya Perla3abcded2010-10-03 22:12:27 -0700832static void be_rx_stats_update(struct be_rx_obj *rxo,
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000833 u32 pktsize, u16 numfrags, u8 pkt_type)
Sathya Perla4097f662009-03-24 16:40:13 -0700834{
Sathya Perla3abcded2010-10-03 22:12:27 -0700835 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700836
Sathya Perla3abcded2010-10-03 22:12:27 -0700837 stats->rx_compl++;
838 stats->rx_frags += numfrags;
839 stats->rx_bytes += pktsize;
840 stats->rx_pkts++;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000841 if (pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700842 stats->rx_mcast_pkts++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843}
844
Ajit Khaparde728a9972009-04-13 15:41:22 -0700845static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
846{
847 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
848
849 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
850 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
851 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
852 if (ip_version) {
853 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
854 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
855 }
856 ipv6_chk = (ip_version && (tcpf || udpf));
857
858 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
859}
860
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700862get_rx_page_info(struct be_adapter *adapter,
863 struct be_rx_obj *rxo,
864 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865{
866 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700867 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868
Sathya Perla3abcded2010-10-03 22:12:27 -0700869 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700870 BUG_ON(!rx_page_info->page);
871
Ajit Khaparde205859a2010-02-09 01:34:21 +0000872 if (rx_page_info->last_page_user) {
FUJITA Tomonorifac6da52010-04-01 16:53:22 +0000873 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874 adapter->big_page_size, PCI_DMA_FROMDEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000875 rx_page_info->last_page_user = false;
876 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877
878 atomic_dec(&rxq->used);
879 return rx_page_info;
880}
881
882/* Throwaway the data in the Rx completion */
883static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700884 struct be_rx_obj *rxo,
885 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886{
Sathya Perla3abcded2010-10-03 22:12:27 -0700887 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888 struct be_rx_page_info *page_info;
889 u16 rxq_idx, i, num_rcvd;
890
891 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
892 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
893
894 for (i = 0; i < num_rcvd; i++) {
Sathya Perla3abcded2010-10-03 22:12:27 -0700895 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896 put_page(page_info->page);
897 memset(page_info, 0, sizeof(*page_info));
898 index_inc(&rxq_idx, rxq->len);
899 }
900}
901
902/*
903 * skb_fill_rx_data forms a complete skb for an ether frame
904 * indicated by rxcp.
905 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700906static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla89420422010-02-17 01:35:26 +0000907 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
908 u16 num_rcvd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700909{
Sathya Perla3abcded2010-10-03 22:12:27 -0700910 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700911 struct be_rx_page_info *page_info;
Sathya Perla89420422010-02-17 01:35:26 +0000912 u16 rxq_idx, i, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700913 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700914 u8 *start;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000915 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700916
917 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
918 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000919 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700920
Sathya Perla3abcded2010-10-03 22:12:27 -0700921 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700922
923 start = page_address(page_info->page) + page_info->page_offset;
924 prefetch(start);
925
926 /* Copy data in the first descriptor of this completion */
927 curr_frag_len = min(pktsize, rx_frag_size);
928
929 /* Copy the header portion into skb_data */
930 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
931 memcpy(skb->data, start, hdr_len);
932 skb->len = curr_frag_len;
933 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
934 /* Complete packet has now been moved to data */
935 put_page(page_info->page);
936 skb->data_len = 0;
937 skb->tail += curr_frag_len;
938 } else {
939 skb_shinfo(skb)->nr_frags = 1;
940 skb_shinfo(skb)->frags[0].page = page_info->page;
941 skb_shinfo(skb)->frags[0].page_offset =
942 page_info->page_offset + hdr_len;
943 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
944 skb->data_len = curr_frag_len - hdr_len;
945 skb->tail += hdr_len;
946 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000947 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700948
949 if (pktsize <= rx_frag_size) {
950 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000951 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700952 }
953
954 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700955 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000956 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700957 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958 index_inc(&rxq_idx, rxq->len);
Sathya Perla3abcded2010-10-03 22:12:27 -0700959 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700960
Ajit Khapardefa774062009-07-22 09:28:55 -0700961 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700962
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000963 /* Coalesce all frags from the same physical page in one slot */
964 if (page_info->page_offset == 0) {
965 /* Fresh page */
966 j++;
967 skb_shinfo(skb)->frags[j].page = page_info->page;
968 skb_shinfo(skb)->frags[j].page_offset =
969 page_info->page_offset;
970 skb_shinfo(skb)->frags[j].size = 0;
971 skb_shinfo(skb)->nr_frags++;
972 } else {
973 put_page(page_info->page);
974 }
975
976 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700977 skb->len += curr_frag_len;
978 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700979
Ajit Khaparde205859a2010-02-09 01:34:21 +0000980 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700981 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000982 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700983
Sathya Perla76fbb422009-06-10 02:21:56 +0000984done:
Sathya Perla3abcded2010-10-03 22:12:27 -0700985 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700986}
987
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700988/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700990 struct be_rx_obj *rxo,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700991 struct be_eth_rx_compl *rxcp)
992{
993 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700994 u32 vlanf, vid;
Sathya Perla89420422010-02-17 01:35:26 +0000995 u16 num_rcvd;
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700996 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700997
Sathya Perla89420422010-02-17 01:35:26 +0000998 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
999 /* Is it a flush compl that has no data */
1000 if (unlikely(num_rcvd == 0))
1001 return;
1002
Eric Dumazet89d71a62009-10-13 05:34:20 +00001003 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001004 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001005 if (net_ratelimit())
1006 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001007 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008 return;
1009 }
1010
Sathya Perla3abcded2010-10-03 22:12:27 -07001011 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012
Ajit Khaparde728a9972009-04-13 15:41:22 -07001013 if (do_pkt_csum(rxcp, adapter->rx_csum))
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001014 skb_checksum_none_assert(skb);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001015 else
1016 skb->ip_summed = CHECKSUM_UNNECESSARY;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001017
1018 skb->truesize = skb->len + sizeof(struct sk_buff);
1019 skb->protocol = eth_type_trans(skb, adapter->netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020
Sathya Perlaa058a632010-02-17 01:34:22 +00001021 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1022 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1023
1024 /* vlanf could be wrongly set in some cards.
1025 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001026 if ((adapter->function_mode & 0x400) && !vtm)
Sathya Perlaa058a632010-02-17 01:34:22 +00001027 vlanf = 0;
1028
1029 if (unlikely(vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001030 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001031 kfree_skb(skb);
1032 return;
1033 }
1034 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Ajit Khaparde9cae9e42010-03-31 02:00:32 +00001035 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001036 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1037 } else {
1038 netif_receive_skb(skb);
1039 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040}
1041
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001042/* Process the RX completion indicated by rxcp when GRO is enabled */
1043static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001044 struct be_rx_obj *rxo,
1045 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001046{
1047 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001048 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001049 struct be_queue_info *rxq = &rxo->q;
1050 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001052 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001053 u8 vtm;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001054 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055
1056 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
Sathya Perla89420422010-02-17 01:35:26 +00001057 /* Is it a flush compl that has no data */
1058 if (unlikely(num_rcvd == 0))
1059 return;
1060
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1062 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1063 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001064 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001065 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001066
1067 /* vlanf could be wrongly set in some cards.
1068 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001069 if ((adapter->function_mode & 0x400) && !vtm)
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001070 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001072 skb = napi_get_frags(&eq_obj->napi);
1073 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001074 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001075 return;
1076 }
1077
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001079 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001080 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081
1082 curr_frag_len = min(remaining, rx_frag_size);
1083
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001084 /* Coalesce all frags from the same physical page in one slot */
1085 if (i == 0 || page_info->page_offset == 0) {
1086 /* First frag or Fresh page */
1087 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001088 skb_shinfo(skb)->frags[j].page = page_info->page;
1089 skb_shinfo(skb)->frags[j].page_offset =
1090 page_info->page_offset;
1091 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001092 } else {
1093 put_page(page_info->page);
1094 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001095 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001096
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099 memset(page_info, 0, sizeof(*page_info));
1100 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001101 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001103 skb_shinfo(skb)->nr_frags = j + 1;
1104 skb->len = pkt_size;
1105 skb->data_len = pkt_size;
1106 skb->truesize += pkt_size;
1107 skb->ip_summed = CHECKSUM_UNNECESSARY;
1108
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001109 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001110 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111 } else {
1112 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Ajit Khaparde9cae9e42010-03-31 02:00:32 +00001113 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001114
Ajit Khaparde82903e42010-02-09 01:34:57 +00001115 if (!adapter->vlan_grp || adapter->vlans_added == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 return;
1117
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001118 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119 }
1120
Sathya Perla3abcded2010-10-03 22:12:27 -07001121 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122}
1123
Sathya Perla3abcded2010-10-03 22:12:27 -07001124static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125{
Sathya Perla3abcded2010-10-03 22:12:27 -07001126 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127
1128 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1129 return NULL;
1130
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001131 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1133
Sathya Perla3abcded2010-10-03 22:12:27 -07001134 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135 return rxcp;
1136}
1137
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001138/* To reset the valid bit, we need to reset the whole word as
1139 * when walking the queue the valid entries are little-endian
1140 * and invalid entries are host endian
1141 */
1142static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1143{
1144 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1145}
1146
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147static inline struct page *be_alloc_pages(u32 size)
1148{
1149 gfp_t alloc_flags = GFP_ATOMIC;
1150 u32 order = get_order(size);
1151 if (order > 0)
1152 alloc_flags |= __GFP_COMP;
1153 return alloc_pages(alloc_flags, order);
1154}
1155
1156/*
1157 * Allocate a page, split it to fragments of size rx_frag_size and post as
1158 * receive buffers to BE
1159 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001160static void be_post_rx_frags(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161{
Sathya Perla3abcded2010-10-03 22:12:27 -07001162 struct be_adapter *adapter = rxo->adapter;
1163 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001164 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001165 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166 struct page *pagep = NULL;
1167 struct be_eth_rx_d *rxd;
1168 u64 page_dmaaddr = 0, frag_dmaaddr;
1169 u32 posted, page_offset = 0;
1170
Sathya Perla3abcded2010-10-03 22:12:27 -07001171 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1173 if (!pagep) {
1174 pagep = be_alloc_pages(adapter->big_page_size);
1175 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001176 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177 break;
1178 }
1179 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1180 adapter->big_page_size,
1181 PCI_DMA_FROMDEVICE);
1182 page_info->page_offset = 0;
1183 } else {
1184 get_page(pagep);
1185 page_info->page_offset = page_offset + rx_frag_size;
1186 }
1187 page_offset = page_info->page_offset;
1188 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001189 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1191
1192 rxd = queue_head_node(rxq);
1193 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1194 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195
1196 /* Any space left in the current big page for another frag? */
1197 if ((page_offset + rx_frag_size + rx_frag_size) >
1198 adapter->big_page_size) {
1199 pagep = NULL;
1200 page_info->last_page_user = true;
1201 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001202
1203 prev_page_info = page_info;
1204 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 page_info = &page_info_tbl[rxq->head];
1206 }
1207 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001208 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209
1210 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001212 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001213 } else if (atomic_read(&rxq->used) == 0) {
1214 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001215 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001216 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217}
1218
Sathya Perla5fb379e2009-06-18 00:02:59 +00001219static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1222
1223 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1224 return NULL;
1225
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001226 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001227 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1228
1229 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1230
1231 queue_tail_inc(tx_cq);
1232 return txcp;
1233}
1234
1235static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1236{
1237 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001238 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1240 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001241 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1242 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001244 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001246 sent_skbs[txq->tail] = NULL;
1247
1248 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001249 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001251 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001253 wrb = queue_tail_node(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001254 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
Eric Dumazete743d312010-04-14 15:59:40 -07001255 skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001256 unmap_skb_hdr = false;
1257
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258 num_wrbs++;
1259 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001260 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261
1262 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001263
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264 kfree_skb(sent_skb);
1265}
1266
Sathya Perla859b1e42009-08-10 03:43:51 +00001267static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1268{
1269 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1270
1271 if (!eqe->evt)
1272 return NULL;
1273
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001274 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001275 eqe->evt = le32_to_cpu(eqe->evt);
1276 queue_tail_inc(&eq_obj->q);
1277 return eqe;
1278}
1279
1280static int event_handle(struct be_adapter *adapter,
1281 struct be_eq_obj *eq_obj)
1282{
1283 struct be_eq_entry *eqe;
1284 u16 num = 0;
1285
1286 while ((eqe = event_get(eq_obj)) != NULL) {
1287 eqe->evt = 0;
1288 num++;
1289 }
1290
1291 /* Deal with any spurious interrupts that come
1292 * without events
1293 */
1294 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1295 if (num)
1296 napi_schedule(&eq_obj->napi);
1297
1298 return num;
1299}
1300
1301/* Just read and notify events without processing them.
1302 * Used at the time of destroying event queues */
1303static void be_eq_clean(struct be_adapter *adapter,
1304 struct be_eq_obj *eq_obj)
1305{
1306 struct be_eq_entry *eqe;
1307 u16 num = 0;
1308
1309 while ((eqe = event_get(eq_obj)) != NULL) {
1310 eqe->evt = 0;
1311 num++;
1312 }
1313
1314 if (num)
1315 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1316}
1317
Sathya Perla3abcded2010-10-03 22:12:27 -07001318static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319{
1320 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001321 struct be_queue_info *rxq = &rxo->q;
1322 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001323 struct be_eth_rx_compl *rxcp;
1324 u16 tail;
1325
1326 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001327 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1328 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001329 be_rx_compl_reset(rxcp);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001330 be_cq_notify(adapter, rx_cq->id, true, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001331 }
1332
1333 /* Then free posted rx buffer that were not used */
1334 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001335 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001336 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337 put_page(page_info->page);
1338 memset(page_info, 0, sizeof(*page_info));
1339 }
1340 BUG_ON(atomic_read(&rxq->used));
1341}
1342
Sathya Perlaa8e91792009-08-10 03:42:43 +00001343static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001345 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001347 struct be_eth_tx_compl *txcp;
1348 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001349 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1350 struct sk_buff *sent_skb;
1351 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352
Sathya Perlaa8e91792009-08-10 03:42:43 +00001353 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1354 do {
1355 while ((txcp = be_tx_compl_get(tx_cq))) {
1356 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1357 wrb_index, txcp);
1358 be_tx_compl_process(adapter, end_idx);
1359 cmpl++;
1360 }
1361 if (cmpl) {
1362 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1363 cmpl = 0;
1364 }
1365
1366 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1367 break;
1368
1369 mdelay(1);
1370 } while (true);
1371
1372 if (atomic_read(&txq->used))
1373 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1374 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001375
1376 /* free posted tx for which compls will never arrive */
1377 while (atomic_read(&txq->used)) {
1378 sent_skb = sent_skbs[txq->tail];
1379 end_idx = txq->tail;
1380 index_adv(&end_idx,
1381 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1382 be_tx_compl_process(adapter, end_idx);
1383 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384}
1385
Sathya Perla5fb379e2009-06-18 00:02:59 +00001386static void be_mcc_queues_destroy(struct be_adapter *adapter)
1387{
1388 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001389
Sathya Perla8788fdc2009-07-27 22:52:03 +00001390 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001391 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001392 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001393 be_queue_free(adapter, q);
1394
Sathya Perla8788fdc2009-07-27 22:52:03 +00001395 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001396 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001397 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001398 be_queue_free(adapter, q);
1399}
1400
1401/* Must be called only after TX qs are created as MCC shares TX EQ */
1402static int be_mcc_queues_create(struct be_adapter *adapter)
1403{
1404 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001405
1406 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001407 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001408 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001409 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001410 goto err;
1411
1412 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001413 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001414 goto mcc_cq_free;
1415
1416 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001417 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001418 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1419 goto mcc_cq_destroy;
1420
1421 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001422 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001423 goto mcc_q_free;
1424
1425 return 0;
1426
1427mcc_q_free:
1428 be_queue_free(adapter, q);
1429mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001430 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001431mcc_cq_free:
1432 be_queue_free(adapter, cq);
1433err:
1434 return -1;
1435}
1436
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437static void be_tx_queues_destroy(struct be_adapter *adapter)
1438{
1439 struct be_queue_info *q;
1440
1441 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001442 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001443 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444 be_queue_free(adapter, q);
1445
1446 q = &adapter->tx_obj.cq;
1447 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001448 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449 be_queue_free(adapter, q);
1450
Sathya Perla859b1e42009-08-10 03:43:51 +00001451 /* Clear any residual events */
1452 be_eq_clean(adapter, &adapter->tx_eq);
1453
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454 q = &adapter->tx_eq.q;
1455 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001456 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457 be_queue_free(adapter, q);
1458}
1459
1460static int be_tx_queues_create(struct be_adapter *adapter)
1461{
1462 struct be_queue_info *eq, *q, *cq;
1463
1464 adapter->tx_eq.max_eqd = 0;
1465 adapter->tx_eq.min_eqd = 0;
1466 adapter->tx_eq.cur_eqd = 96;
1467 adapter->tx_eq.enable_aic = false;
1468 /* Alloc Tx Event queue */
1469 eq = &adapter->tx_eq.q;
1470 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1471 return -1;
1472
1473 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001474 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475 goto tx_eq_free;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001476 adapter->base_eq_id = adapter->tx_eq.q.id;
1477
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 /* Alloc TX eth compl queue */
1479 cq = &adapter->tx_obj.cq;
1480 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1481 sizeof(struct be_eth_tx_compl)))
1482 goto tx_eq_destroy;
1483
1484 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001485 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 goto tx_cq_free;
1487
1488 /* Alloc TX eth queue */
1489 q = &adapter->tx_obj.q;
1490 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1491 goto tx_cq_destroy;
1492
1493 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001494 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495 goto tx_q_free;
1496 return 0;
1497
1498tx_q_free:
1499 be_queue_free(adapter, q);
1500tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001501 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001502tx_cq_free:
1503 be_queue_free(adapter, cq);
1504tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001505 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506tx_eq_free:
1507 be_queue_free(adapter, eq);
1508 return -1;
1509}
1510
1511static void be_rx_queues_destroy(struct be_adapter *adapter)
1512{
1513 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001514 struct be_rx_obj *rxo;
1515 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516
Sathya Perla3abcded2010-10-03 22:12:27 -07001517 for_all_rx_queues(adapter, rxo, i) {
1518 q = &rxo->q;
1519 if (q->created) {
1520 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1521 /* After the rxq is invalidated, wait for a grace time
1522 * of 1ms for all dma to end and the flush compl to
1523 * arrive
1524 */
1525 mdelay(1);
1526 be_rx_q_clean(adapter, rxo);
1527 }
1528 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001529
Sathya Perla3abcded2010-10-03 22:12:27 -07001530 q = &rxo->cq;
1531 if (q->created)
1532 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1533 be_queue_free(adapter, q);
1534
1535 /* Clear any residual events */
1536 q = &rxo->rx_eq.q;
1537 if (q->created) {
1538 be_eq_clean(adapter, &rxo->rx_eq);
1539 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1540 }
1541 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543}
1544
1545static int be_rx_queues_create(struct be_adapter *adapter)
1546{
1547 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001548 struct be_rx_obj *rxo;
1549 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001552 for_all_rx_queues(adapter, rxo, i) {
1553 rxo->adapter = adapter;
1554 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1555 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556
Sathya Perla3abcded2010-10-03 22:12:27 -07001557 /* EQ */
1558 eq = &rxo->rx_eq.q;
1559 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1560 sizeof(struct be_eq_entry));
1561 if (rc)
1562 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563
Sathya Perla3abcded2010-10-03 22:12:27 -07001564 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1565 if (rc)
1566 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567
Sathya Perla3abcded2010-10-03 22:12:27 -07001568 /* CQ */
1569 cq = &rxo->cq;
1570 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1571 sizeof(struct be_eth_rx_compl));
1572 if (rc)
1573 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574
Sathya Perla3abcded2010-10-03 22:12:27 -07001575 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1576 if (rc)
1577 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578
Sathya Perla3abcded2010-10-03 22:12:27 -07001579 /* Rx Q */
1580 q = &rxo->q;
1581 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1582 sizeof(struct be_eth_rx_d));
1583 if (rc)
1584 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Sathya Perla3abcded2010-10-03 22:12:27 -07001586 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1587 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1588 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1589 if (rc)
1590 goto err;
1591 }
1592
1593 if (be_multi_rxq(adapter)) {
1594 u8 rsstable[MAX_RSS_QS];
1595
1596 for_all_rss_queues(adapter, rxo, i)
1597 rsstable[i] = rxo->rss_id;
1598
1599 rc = be_cmd_rss_config(adapter, rsstable,
1600 adapter->num_rx_qs - 1);
1601 if (rc)
1602 goto err;
1603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001604
1605 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001606err:
1607 be_rx_queues_destroy(adapter);
1608 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610
Sathya Perlab628bde2009-08-17 00:58:26 +00001611/* There are 8 evt ids per func. Retruns the evt id's bit number */
1612static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1613{
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001614 return eq_id - adapter->base_eq_id;
Sathya Perlab628bde2009-08-17 00:58:26 +00001615}
1616
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617static irqreturn_t be_intx(int irq, void *dev)
1618{
1619 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001620 struct be_rx_obj *rxo;
1621 int isr, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622
Sathya Perla8788fdc2009-07-27 22:52:03 +00001623 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
Sathya Perla55bdeed2010-02-02 07:48:40 -08001624 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
Sathya Perlac001c212009-07-01 01:06:07 +00001625 if (!isr)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001626 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627
Sathya Perla3abcded2010-10-03 22:12:27 -07001628 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
1629 event_handle(adapter, &adapter->tx_eq);
1630
1631 for_all_rx_queues(adapter, rxo, i) {
1632 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
1633 event_handle(adapter, &rxo->rx_eq);
1634 }
Sathya Perlac001c212009-07-01 01:06:07 +00001635
Sathya Perla8788fdc2009-07-27 22:52:03 +00001636 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637}
1638
1639static irqreturn_t be_msix_rx(int irq, void *dev)
1640{
Sathya Perla3abcded2010-10-03 22:12:27 -07001641 struct be_rx_obj *rxo = dev;
1642 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643
Sathya Perla3abcded2010-10-03 22:12:27 -07001644 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645
1646 return IRQ_HANDLED;
1647}
1648
Sathya Perla5fb379e2009-06-18 00:02:59 +00001649static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650{
1651 struct be_adapter *adapter = dev;
1652
Sathya Perla8788fdc2009-07-27 22:52:03 +00001653 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654
1655 return IRQ_HANDLED;
1656}
1657
Sathya Perla3abcded2010-10-03 22:12:27 -07001658static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 struct be_eth_rx_compl *rxcp)
1660{
1661 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1662 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1663
1664 if (err)
Sathya Perla3abcded2010-10-03 22:12:27 -07001665 rxo->stats.rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001667 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668}
1669
1670int be_poll_rx(struct napi_struct *napi, int budget)
1671{
1672 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001673 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1674 struct be_adapter *adapter = rxo->adapter;
1675 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 struct be_eth_rx_compl *rxcp;
1677 u32 work_done;
1678
Sathya Perla3abcded2010-10-03 22:12:27 -07001679 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001681 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 if (!rxcp)
1683 break;
1684
Sathya Perla3abcded2010-10-03 22:12:27 -07001685 if (do_gro(adapter, rxo, rxcp))
1686 be_rx_compl_process_gro(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687 else
Sathya Perla3abcded2010-10-03 22:12:27 -07001688 be_rx_compl_process(adapter, rxo, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001689
1690 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691 }
1692
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001694 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1695 be_post_rx_frags(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
1697 /* All consumed */
1698 if (work_done < budget) {
1699 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001700 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 } else {
1702 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001703 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 }
1705 return work_done;
1706}
1707
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001708/* As TX and MCC share the same EQ check for both TX and MCC completions.
1709 * For TX/MCC we don't honour budget; consume everything
1710 */
1711static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001713 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1714 struct be_adapter *adapter =
1715 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001716 struct be_queue_info *txq = &adapter->tx_obj.q;
1717 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001719 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720 u16 end_idx;
1721
Sathya Perla5fb379e2009-06-18 00:02:59 +00001722 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001724 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001726 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727 }
1728
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001729 mcc_compl = be_process_mcc(adapter, &status);
1730
1731 napi_complete(napi);
1732
1733 if (mcc_compl) {
1734 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1735 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1736 }
1737
1738 if (tx_compl) {
1739 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001740
1741 /* As Tx wrbs have been freed up, wake up netdev queue if
1742 * it was stopped due to lack of tx wrbs.
1743 */
1744 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001746 netif_wake_queue(adapter->netdev);
1747 }
1748
Sathya Perla3abcded2010-10-03 22:12:27 -07001749 tx_stats(adapter)->be_tx_events++;
1750 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
1753 return 1;
1754}
1755
Ajit Khaparded053de92010-09-03 06:23:30 +00001756void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001757{
1758 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1759 u32 i;
1760
1761 pci_read_config_dword(adapter->pdev,
1762 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1763 pci_read_config_dword(adapter->pdev,
1764 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1765 pci_read_config_dword(adapter->pdev,
1766 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1767 pci_read_config_dword(adapter->pdev,
1768 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1769
1770 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1771 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1772
Ajit Khaparded053de92010-09-03 06:23:30 +00001773 if (ue_status_lo || ue_status_hi) {
1774 adapter->ue_detected = true;
1775 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1776 }
1777
Ajit Khaparde7c185272010-07-29 06:16:33 +00001778 if (ue_status_lo) {
1779 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1780 if (ue_status_lo & 1)
1781 dev_err(&adapter->pdev->dev,
1782 "UE: %s bit set\n", ue_status_low_desc[i]);
1783 }
1784 }
1785 if (ue_status_hi) {
1786 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1787 if (ue_status_hi & 1)
1788 dev_err(&adapter->pdev->dev,
1789 "UE: %s bit set\n", ue_status_hi_desc[i]);
1790 }
1791 }
1792
1793}
1794
Sathya Perlaea1dae12009-03-19 23:56:20 -07001795static void be_worker(struct work_struct *work)
1796{
1797 struct be_adapter *adapter =
1798 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001799 struct be_rx_obj *rxo;
1800 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001801
Ajit Khaparde0fc48c32010-07-29 06:18:58 +00001802 if (!adapter->stats_ioctl_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001803 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001804
Sathya Perla4097f662009-03-24 16:40:13 -07001805 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001806
Sathya Perla3abcded2010-10-03 22:12:27 -07001807 for_all_rx_queues(adapter, rxo, i) {
1808 be_rx_rate_update(rxo);
1809 be_rx_eqd_update(adapter, rxo);
1810
1811 if (rxo->rx_post_starved) {
1812 rxo->rx_post_starved = false;
1813 be_post_rx_frags(rxo);
1814 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001815 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001816
Ajit Khaparded053de92010-09-03 06:23:30 +00001817 if (!adapter->ue_detected)
1818 be_detect_dump_ue(adapter);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001819
1820 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1821}
1822
Sathya Perla8d56ff12009-11-22 22:02:26 +00001823static void be_msix_disable(struct be_adapter *adapter)
1824{
1825 if (adapter->msix_enabled) {
1826 pci_disable_msix(adapter->pdev);
1827 adapter->msix_enabled = false;
1828 }
1829}
1830
Sathya Perla3abcded2010-10-03 22:12:27 -07001831static int be_num_rxqs_get(struct be_adapter *adapter)
1832{
1833 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1834 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1835 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1836 } else {
1837 dev_warn(&adapter->pdev->dev,
1838 "No support for multiple RX queues\n");
1839 return 1;
1840 }
1841}
1842
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843static void be_msix_enable(struct be_adapter *adapter)
1844{
Sathya Perla3abcded2010-10-03 22:12:27 -07001845#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846 int i, status;
1847
Sathya Perla3abcded2010-10-03 22:12:27 -07001848 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1849
1850 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 adapter->msix_entries[i].entry = i;
1852
1853 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perla3abcded2010-10-03 22:12:27 -07001854 adapter->num_rx_qs + 1);
1855 if (status == 0) {
1856 goto done;
1857 } else if (status >= BE_MIN_MSIX_VECTORS) {
1858 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1859 status) == 0) {
1860 adapter->num_rx_qs = status - 1;
1861 dev_warn(&adapter->pdev->dev,
1862 "Could alloc only %d MSIx vectors. "
1863 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1864 goto done;
1865 }
1866 }
1867 return;
1868done:
1869 adapter->msix_enabled = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870}
1871
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001872static void be_sriov_enable(struct be_adapter *adapter)
1873{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001874 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001875#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001876 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001877 int status;
1878
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001879 status = pci_enable_sriov(adapter->pdev, num_vfs);
1880 adapter->sriov_enabled = status ? false : true;
1881 }
1882#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001883}
1884
1885static void be_sriov_disable(struct be_adapter *adapter)
1886{
1887#ifdef CONFIG_PCI_IOV
1888 if (adapter->sriov_enabled) {
1889 pci_disable_sriov(adapter->pdev);
1890 adapter->sriov_enabled = false;
1891 }
1892#endif
1893}
1894
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1896{
Sathya Perlab628bde2009-08-17 00:58:26 +00001897 return adapter->msix_entries[
1898 be_evt_bit_get(adapter, eq_id)].vector;
1899}
1900
1901static int be_request_irq(struct be_adapter *adapter,
1902 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001903 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001904{
1905 struct net_device *netdev = adapter->netdev;
1906 int vec;
1907
1908 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1909 vec = be_msix_vec_get(adapter, eq_obj->q.id);
Sathya Perla3abcded2010-10-03 22:12:27 -07001910 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00001911}
1912
Sathya Perla3abcded2010-10-03 22:12:27 -07001913static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1914 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001915{
1916 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
Sathya Perla3abcded2010-10-03 22:12:27 -07001917 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918}
1919
1920static int be_msix_register(struct be_adapter *adapter)
1921{
Sathya Perla3abcded2010-10-03 22:12:27 -07001922 struct be_rx_obj *rxo;
1923 int status, i;
1924 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001925
Sathya Perla3abcded2010-10-03 22:12:27 -07001926 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1927 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928 if (status)
1929 goto err;
1930
Sathya Perla3abcded2010-10-03 22:12:27 -07001931 for_all_rx_queues(adapter, rxo, i) {
1932 sprintf(qname, "rxq%d", i);
1933 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1934 qname, rxo);
1935 if (status)
1936 goto err_msix;
1937 }
Sathya Perlab628bde2009-08-17 00:58:26 +00001938
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001940
Sathya Perla3abcded2010-10-03 22:12:27 -07001941err_msix:
1942 be_free_irq(adapter, &adapter->tx_eq, adapter);
1943
1944 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1945 be_free_irq(adapter, &rxo->rx_eq, rxo);
1946
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947err:
1948 dev_warn(&adapter->pdev->dev,
1949 "MSIX Request IRQ failed - err %d\n", status);
1950 pci_disable_msix(adapter->pdev);
1951 adapter->msix_enabled = false;
1952 return status;
1953}
1954
1955static int be_irq_register(struct be_adapter *adapter)
1956{
1957 struct net_device *netdev = adapter->netdev;
1958 int status;
1959
1960 if (adapter->msix_enabled) {
1961 status = be_msix_register(adapter);
1962 if (status == 0)
1963 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001964 /* INTx is not supported for VF */
1965 if (!be_physfn(adapter))
1966 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967 }
1968
1969 /* INTx */
1970 netdev->irq = adapter->pdev->irq;
1971 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1972 adapter);
1973 if (status) {
1974 dev_err(&adapter->pdev->dev,
1975 "INTx request IRQ failed - err %d\n", status);
1976 return status;
1977 }
1978done:
1979 adapter->isr_registered = true;
1980 return 0;
1981}
1982
1983static void be_irq_unregister(struct be_adapter *adapter)
1984{
1985 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001986 struct be_rx_obj *rxo;
1987 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988
1989 if (!adapter->isr_registered)
1990 return;
1991
1992 /* INTx */
1993 if (!adapter->msix_enabled) {
1994 free_irq(netdev->irq, adapter);
1995 goto done;
1996 }
1997
1998 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07001999 be_free_irq(adapter, &adapter->tx_eq, adapter);
2000
2001 for_all_rx_queues(adapter, rxo, i)
2002 be_free_irq(adapter, &rxo->rx_eq, rxo);
2003
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004done:
2005 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006}
2007
Sathya Perla889cd4b2010-05-30 23:33:45 +00002008static int be_close(struct net_device *netdev)
2009{
2010 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002011 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002012 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002013 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002014
2015 cancel_delayed_work_sync(&adapter->work);
2016
2017 be_async_mcc_disable(adapter);
2018
2019 netif_stop_queue(netdev);
2020 netif_carrier_off(netdev);
2021 adapter->link_up = false;
2022
2023 be_intr_set(adapter, false);
2024
2025 if (adapter->msix_enabled) {
2026 vec = be_msix_vec_get(adapter, tx_eq->q.id);
2027 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002028
2029 for_all_rx_queues(adapter, rxo, i) {
2030 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
2031 synchronize_irq(vec);
2032 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002033 } else {
2034 synchronize_irq(netdev->irq);
2035 }
2036 be_irq_unregister(adapter);
2037
Sathya Perla3abcded2010-10-03 22:12:27 -07002038 for_all_rx_queues(adapter, rxo, i)
2039 napi_disable(&rxo->rx_eq.napi);
2040
Sathya Perla889cd4b2010-05-30 23:33:45 +00002041 napi_disable(&tx_eq->napi);
2042
2043 /* Wait for all pending tx completions to arrive so that
2044 * all tx skbs are freed.
2045 */
2046 be_tx_compl_clean(adapter);
2047
2048 return 0;
2049}
2050
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051static int be_open(struct net_device *netdev)
2052{
2053 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002054 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002055 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002056 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002057 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002058 u8 mac_speed;
2059 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002060
Sathya Perla3abcded2010-10-03 22:12:27 -07002061 for_all_rx_queues(adapter, rxo, i) {
2062 be_post_rx_frags(rxo);
2063 napi_enable(&rxo->rx_eq.napi);
2064 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002065 napi_enable(&tx_eq->napi);
2066
2067 be_irq_register(adapter);
2068
Sathya Perla8788fdc2009-07-27 22:52:03 +00002069 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002070
2071 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002072 for_all_rx_queues(adapter, rxo, i) {
2073 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2074 be_cq_notify(adapter, rxo->cq.id, true, 0);
2075 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002076 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002077
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002078 /* Now that interrupts are on we can process async mcc */
2079 be_async_mcc_enable(adapter);
2080
Sathya Perla889cd4b2010-05-30 23:33:45 +00002081 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2082
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002083 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2084 &link_speed);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002085 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002086 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002087 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002088
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002089 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002090 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002091 if (status)
2092 goto err;
2093
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002094 status = be_cmd_set_flow_control(adapter,
2095 adapter->tx_fc, adapter->rx_fc);
2096 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002097 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002098 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002099
Sathya Perla889cd4b2010-05-30 23:33:45 +00002100 return 0;
2101err:
2102 be_close(adapter->netdev);
2103 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002104}
2105
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002106static int be_setup_wol(struct be_adapter *adapter, bool enable)
2107{
2108 struct be_dma_mem cmd;
2109 int status = 0;
2110 u8 mac[ETH_ALEN];
2111
2112 memset(mac, 0, ETH_ALEN);
2113
2114 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2115 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2116 if (cmd.va == NULL)
2117 return -1;
2118 memset(cmd.va, 0, cmd.size);
2119
2120 if (enable) {
2121 status = pci_write_config_dword(adapter->pdev,
2122 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2123 if (status) {
2124 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002125 "Could not enable Wake-on-lan\n");
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002126 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2127 cmd.dma);
2128 return status;
2129 }
2130 status = be_cmd_enable_magic_wol(adapter,
2131 adapter->netdev->dev_addr, &cmd);
2132 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2133 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2134 } else {
2135 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2136 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2137 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2138 }
2139
2140 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2141 return status;
2142}
2143
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002144/*
2145 * Generate a seed MAC address from the PF MAC Address using jhash.
2146 * MAC Address for VFs are assigned incrementally starting from the seed.
2147 * These addresses are programmed in the ASIC by the PF and the VF driver
2148 * queries for the MAC address during its probe.
2149 */
2150static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2151{
2152 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002153 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002154 u8 mac[ETH_ALEN];
2155
2156 be_vf_eth_addr_generate(adapter, mac);
2157
2158 for (vf = 0; vf < num_vfs; vf++) {
2159 status = be_cmd_pmac_add(adapter, mac,
2160 adapter->vf_cfg[vf].vf_if_handle,
2161 &adapter->vf_cfg[vf].vf_pmac_id);
2162 if (status)
2163 dev_err(&adapter->pdev->dev,
2164 "Mac address add failed for VF %d\n", vf);
2165 else
2166 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2167
2168 mac[5] += 1;
2169 }
2170 return status;
2171}
2172
2173static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2174{
2175 u32 vf;
2176
2177 for (vf = 0; vf < num_vfs; vf++) {
2178 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2179 be_cmd_pmac_del(adapter,
2180 adapter->vf_cfg[vf].vf_if_handle,
2181 adapter->vf_cfg[vf].vf_pmac_id);
2182 }
2183}
2184
Sathya Perla5fb379e2009-06-18 00:02:59 +00002185static int be_setup(struct be_adapter *adapter)
2186{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002187 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002188 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002190 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002192 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2193
2194 if (be_physfn(adapter)) {
2195 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2196 BE_IF_FLAGS_PROMISCUOUS |
2197 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2198 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002199
2200 if (be_multi_rxq(adapter)) {
2201 cap_flags |= BE_IF_FLAGS_RSS;
2202 en_flags |= BE_IF_FLAGS_RSS;
2203 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002204 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002205
2206 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2207 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002208 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002209 if (status != 0)
2210 goto do_none;
2211
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002212 if (be_physfn(adapter)) {
2213 while (vf < num_vfs) {
2214 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2215 | BE_IF_FLAGS_BROADCAST;
2216 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002217 mac, true,
2218 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002219 NULL, vf+1);
2220 if (status) {
2221 dev_err(&adapter->pdev->dev,
2222 "Interface Create failed for VF %d\n", vf);
2223 goto if_destroy;
2224 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002225 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002226 vf++;
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002227 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002228 } else if (!be_physfn(adapter)) {
2229 status = be_cmd_mac_addr_query(adapter, mac,
2230 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2231 if (!status) {
2232 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2233 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2234 }
2235 }
2236
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237 status = be_tx_queues_create(adapter);
2238 if (status != 0)
2239 goto if_destroy;
2240
2241 status = be_rx_queues_create(adapter);
2242 if (status != 0)
2243 goto tx_qs_destroy;
2244
Sathya Perla5fb379e2009-06-18 00:02:59 +00002245 status = be_mcc_queues_create(adapter);
2246 if (status != 0)
2247 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002249 if (be_physfn(adapter)) {
2250 status = be_vf_eth_addr_config(adapter);
2251 if (status)
2252 goto mcc_q_destroy;
2253 }
2254
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002255 adapter->link_speed = -1;
2256
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257 return 0;
2258
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002259mcc_q_destroy:
2260 if (be_physfn(adapter))
2261 be_vf_eth_addr_rem(adapter);
2262 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002263rx_qs_destroy:
2264 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265tx_qs_destroy:
2266 be_tx_queues_destroy(adapter);
2267if_destroy:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002268 for (vf = 0; vf < num_vfs; vf++)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002269 if (adapter->vf_cfg[vf].vf_if_handle)
2270 be_cmd_if_destroy(adapter,
2271 adapter->vf_cfg[vf].vf_if_handle);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002272 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273do_none:
2274 return status;
2275}
2276
Sathya Perla5fb379e2009-06-18 00:02:59 +00002277static int be_clear(struct be_adapter *adapter)
2278{
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002279 if (be_physfn(adapter))
2280 be_vf_eth_addr_rem(adapter);
2281
Sathya Perla1a8887d2009-08-17 00:58:41 +00002282 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002283 be_rx_queues_destroy(adapter);
2284 be_tx_queues_destroy(adapter);
2285
Sathya Perla8788fdc2009-07-27 22:52:03 +00002286 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002287
Sathya Perla2243e2e2009-11-22 22:02:03 +00002288 /* tell fw we're done with firing cmds */
2289 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002290 return 0;
2291}
2292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293
Ajit Khaparde84517482009-09-04 03:12:16 +00002294#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2295char flash_cookie[2][16] = {"*** SE FLAS",
2296 "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002297
2298static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002299 const u8 *p, u32 img_start, int image_size,
2300 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002301{
2302 u32 crc_offset;
2303 u8 flashed_crc[4];
2304 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002305
2306 crc_offset = hdr_size + img_start + image_size - 4;
2307
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002308 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002309
2310 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002311 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002312 if (status) {
2313 dev_err(&adapter->pdev->dev,
2314 "could not get crc from flash, not flashing redboot\n");
2315 return false;
2316 }
2317
2318 /*update redboot only if crc does not match*/
2319 if (!memcmp(flashed_crc, p, 4))
2320 return false;
2321 else
2322 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002323}
2324
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002325static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002326 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002327 struct be_dma_mem *flash_cmd, int num_of_images)
2328
Ajit Khaparde84517482009-09-04 03:12:16 +00002329{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002330 int status = 0, i, filehdr_size = 0;
2331 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002332 int num_bytes;
2333 const u8 *p = fw->data;
2334 struct be_cmd_write_flashrom *req = flash_cmd->va;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002335 struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002336 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002337
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002338 struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002339 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2340 FLASH_IMAGE_MAX_SIZE_g3},
2341 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2342 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2343 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2344 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2345 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2346 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2347 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2348 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2349 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2350 FLASH_IMAGE_MAX_SIZE_g3},
2351 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2352 FLASH_IMAGE_MAX_SIZE_g3},
2353 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002354 FLASH_IMAGE_MAX_SIZE_g3},
2355 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2356 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002357 };
2358 struct flash_comp gen2_flash_types[8] = {
2359 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2360 FLASH_IMAGE_MAX_SIZE_g2},
2361 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2362 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2363 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2364 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2365 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2366 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2367 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2368 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2369 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2370 FLASH_IMAGE_MAX_SIZE_g2},
2371 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2372 FLASH_IMAGE_MAX_SIZE_g2},
2373 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2374 FLASH_IMAGE_MAX_SIZE_g2}
2375 };
2376
2377 if (adapter->generation == BE_GEN3) {
2378 pflashcomp = gen3_flash_types;
2379 filehdr_size = sizeof(struct flash_file_hdr_g3);
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002380 num_comp = 9;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002381 } else {
2382 pflashcomp = gen2_flash_types;
2383 filehdr_size = sizeof(struct flash_file_hdr_g2);
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002384 num_comp = 8;
Ajit Khaparde84517482009-09-04 03:12:16 +00002385 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002386 for (i = 0; i < num_comp; i++) {
2387 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2388 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2389 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002390 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2391 (!be_flash_redboot(adapter, fw->data,
2392 pflashcomp[i].offset, pflashcomp[i].size,
2393 filehdr_size)))
2394 continue;
2395 p = fw->data;
2396 p += filehdr_size + pflashcomp[i].offset
2397 + (num_of_images * sizeof(struct image_hdr));
2398 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002399 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002400 total_bytes = pflashcomp[i].size;
2401 while (total_bytes) {
2402 if (total_bytes > 32*1024)
2403 num_bytes = 32*1024;
2404 else
2405 num_bytes = total_bytes;
2406 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002407
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002408 if (!total_bytes)
2409 flash_op = FLASHROM_OPER_FLASH;
2410 else
2411 flash_op = FLASHROM_OPER_SAVE;
2412 memcpy(req->params.data_buf, p, num_bytes);
2413 p += num_bytes;
2414 status = be_cmd_write_flashrom(adapter, flash_cmd,
2415 pflashcomp[i].optype, flash_op, num_bytes);
2416 if (status) {
2417 dev_err(&adapter->pdev->dev,
2418 "cmd to write to flash rom failed.\n");
2419 return -1;
2420 }
2421 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002422 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002423 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002424 return 0;
2425}
2426
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002427static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2428{
2429 if (fhdr == NULL)
2430 return 0;
2431 if (fhdr->build[0] == '3')
2432 return BE_GEN3;
2433 else if (fhdr->build[0] == '2')
2434 return BE_GEN2;
2435 else
2436 return 0;
2437}
2438
Ajit Khaparde84517482009-09-04 03:12:16 +00002439int be_load_fw(struct be_adapter *adapter, u8 *func)
2440{
2441 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2442 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002443 struct flash_file_hdr_g2 *fhdr;
2444 struct flash_file_hdr_g3 *fhdr3;
2445 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002446 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002447 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002448 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002449
Ajit Khaparde84517482009-09-04 03:12:16 +00002450 strcpy(fw_file, func);
2451
2452 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2453 if (status)
2454 goto fw_exit;
2455
2456 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002457 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002458 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2459
Ajit Khaparde84517482009-09-04 03:12:16 +00002460 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2461 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2462 &flash_cmd.dma);
2463 if (!flash_cmd.va) {
2464 status = -ENOMEM;
2465 dev_err(&adapter->pdev->dev,
2466 "Memory allocation failure while flashing\n");
2467 goto fw_exit;
2468 }
2469
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002470 if ((adapter->generation == BE_GEN3) &&
2471 (get_ufigen_type(fhdr) == BE_GEN3)) {
2472 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002473 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2474 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002475 img_hdr_ptr = (struct image_hdr *) (fw->data +
2476 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002477 i * sizeof(struct image_hdr)));
2478 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2479 status = be_flash_data(adapter, fw, &flash_cmd,
2480 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002481 }
2482 } else if ((adapter->generation == BE_GEN2) &&
2483 (get_ufigen_type(fhdr) == BE_GEN2)) {
2484 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2485 } else {
2486 dev_err(&adapter->pdev->dev,
2487 "UFI and Interface are not compatible for flashing\n");
2488 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002489 }
2490
2491 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2492 flash_cmd.dma);
2493 if (status) {
2494 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2495 goto fw_exit;
2496 }
2497
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002498 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002499
2500fw_exit:
2501 release_firmware(fw);
2502 return status;
2503}
2504
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002505static struct net_device_ops be_netdev_ops = {
2506 .ndo_open = be_open,
2507 .ndo_stop = be_close,
2508 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002509 .ndo_set_rx_mode = be_set_multicast_list,
2510 .ndo_set_mac_address = be_mac_addr_set,
2511 .ndo_change_mtu = be_change_mtu,
2512 .ndo_validate_addr = eth_validate_addr,
2513 .ndo_vlan_rx_register = be_vlan_register,
2514 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2515 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002516 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002517 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002518 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002519 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002520};
2521
2522static void be_netdev_init(struct net_device *netdev)
2523{
2524 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002525 struct be_rx_obj *rxo;
2526 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002527
2528 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Ajit Khaparde583e3f32009-10-05 02:22:19 +00002529 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
Ajit Khaparde49e4b8472010-06-14 04:56:07 +00002530 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002531
Ajit Khaparde51c59872009-11-29 17:54:54 +00002532 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2533
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002534 netdev->flags |= IFF_MULTICAST;
2535
Ajit Khaparde728a9972009-04-13 15:41:22 -07002536 adapter->rx_csum = true;
2537
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002538 /* Default settings for Rx and Tx flow control */
2539 adapter->rx_fc = true;
2540 adapter->tx_fc = true;
2541
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002542 netif_set_gso_max_size(netdev, 65535);
2543
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002544 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2545
2546 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2547
Sathya Perla3abcded2010-10-03 22:12:27 -07002548 for_all_rx_queues(adapter, rxo, i)
2549 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2550 BE_NAPI_WEIGHT);
2551
Sathya Perla5fb379e2009-06-18 00:02:59 +00002552 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002553 BE_NAPI_WEIGHT);
2554
2555 netif_carrier_off(netdev);
2556 netif_stop_queue(netdev);
2557}
2558
2559static void be_unmap_pci_bars(struct be_adapter *adapter)
2560{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002561 if (adapter->csr)
2562 iounmap(adapter->csr);
2563 if (adapter->db)
2564 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002565 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002566 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002567}
2568
2569static int be_map_pci_bars(struct be_adapter *adapter)
2570{
2571 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002572 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002573
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002574 if (be_physfn(adapter)) {
2575 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2576 pci_resource_len(adapter->pdev, 2));
2577 if (addr == NULL)
2578 return -ENOMEM;
2579 adapter->csr = addr;
2580 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002581
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002582 if (adapter->generation == BE_GEN2) {
2583 pcicfg_reg = 1;
2584 db_reg = 4;
2585 } else {
2586 pcicfg_reg = 0;
2587 if (be_physfn(adapter))
2588 db_reg = 4;
2589 else
2590 db_reg = 0;
2591 }
2592 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2593 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002594 if (addr == NULL)
2595 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002596 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002598 if (be_physfn(adapter)) {
2599 addr = ioremap_nocache(
2600 pci_resource_start(adapter->pdev, pcicfg_reg),
2601 pci_resource_len(adapter->pdev, pcicfg_reg));
2602 if (addr == NULL)
2603 goto pci_map_err;
2604 adapter->pcicfg = addr;
2605 } else
2606 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002607
2608 return 0;
2609pci_map_err:
2610 be_unmap_pci_bars(adapter);
2611 return -ENOMEM;
2612}
2613
2614
2615static void be_ctrl_cleanup(struct be_adapter *adapter)
2616{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002617 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002618
2619 be_unmap_pci_bars(adapter);
2620
2621 if (mem->va)
2622 pci_free_consistent(adapter->pdev, mem->size,
2623 mem->va, mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002624
2625 mem = &adapter->mc_cmd_mem;
2626 if (mem->va)
2627 pci_free_consistent(adapter->pdev, mem->size,
2628 mem->va, mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002629}
2630
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002631static int be_ctrl_init(struct be_adapter *adapter)
2632{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002633 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2634 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002635 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002636 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637
2638 status = be_map_pci_bars(adapter);
2639 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002640 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641
2642 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2643 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2644 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2645 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002646 status = -ENOMEM;
2647 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002648 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002649
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002650 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2651 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2652 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2653 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002654
2655 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2656 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2657 &mc_cmd_mem->dma);
2658 if (mc_cmd_mem->va == NULL) {
2659 status = -ENOMEM;
2660 goto free_mbox;
2661 }
2662 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2663
Sathya Perla8788fdc2009-07-27 22:52:03 +00002664 spin_lock_init(&adapter->mbox_lock);
2665 spin_lock_init(&adapter->mcc_lock);
2666 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002667
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002668 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002669 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002670 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002671
2672free_mbox:
2673 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2674 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2675
2676unmap_pci_bars:
2677 be_unmap_pci_bars(adapter);
2678
2679done:
2680 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002681}
2682
2683static void be_stats_cleanup(struct be_adapter *adapter)
2684{
Sathya Perla3abcded2010-10-03 22:12:27 -07002685 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002686
2687 if (cmd->va)
2688 pci_free_consistent(adapter->pdev, cmd->size,
2689 cmd->va, cmd->dma);
2690}
2691
2692static int be_stats_init(struct be_adapter *adapter)
2693{
Sathya Perla3abcded2010-10-03 22:12:27 -07002694 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002695
2696 cmd->size = sizeof(struct be_cmd_req_get_stats);
2697 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2698 if (cmd->va == NULL)
2699 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002700 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701 return 0;
2702}
2703
2704static void __devexit be_remove(struct pci_dev *pdev)
2705{
2706 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002707
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002708 if (!adapter)
2709 return;
2710
2711 unregister_netdev(adapter->netdev);
2712
Sathya Perla5fb379e2009-06-18 00:02:59 +00002713 be_clear(adapter);
2714
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002715 be_stats_cleanup(adapter);
2716
2717 be_ctrl_cleanup(adapter);
2718
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002719 be_sriov_disable(adapter);
2720
Sathya Perla8d56ff12009-11-22 22:02:26 +00002721 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002722
2723 pci_set_drvdata(pdev, NULL);
2724 pci_release_regions(pdev);
2725 pci_disable_device(pdev);
2726
2727 free_netdev(adapter->netdev);
2728}
2729
Sathya Perla2243e2e2009-11-22 22:02:03 +00002730static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002731{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002732 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002733 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002734
Sathya Perla8788fdc2009-07-27 22:52:03 +00002735 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002736 if (status)
2737 return status;
2738
Sathya Perla3abcded2010-10-03 22:12:27 -07002739 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2740 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002741 if (status)
2742 return status;
2743
2744 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002745
2746 if (be_physfn(adapter)) {
2747 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002748 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002749
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002750 if (status)
2751 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002752
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002753 if (!is_valid_ether_addr(mac))
2754 return -EADDRNOTAVAIL;
2755
2756 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2757 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2758 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002759
Ajit Khaparde3486be22010-07-23 02:04:54 +00002760 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002761 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2762 else
2763 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2764
Sathya Perla2243e2e2009-11-22 22:02:03 +00002765 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002766}
2767
2768static int __devinit be_probe(struct pci_dev *pdev,
2769 const struct pci_device_id *pdev_id)
2770{
2771 int status = 0;
2772 struct be_adapter *adapter;
2773 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002774
2775 status = pci_enable_device(pdev);
2776 if (status)
2777 goto do_none;
2778
2779 status = pci_request_regions(pdev, DRV_NAME);
2780 if (status)
2781 goto disable_dev;
2782 pci_set_master(pdev);
2783
2784 netdev = alloc_etherdev(sizeof(struct be_adapter));
2785 if (netdev == NULL) {
2786 status = -ENOMEM;
2787 goto rel_reg;
2788 }
2789 adapter = netdev_priv(netdev);
Ajit Khaparde7b139c82010-01-27 21:56:44 +00002790
2791 switch (pdev->device) {
2792 case BE_DEVICE_ID1:
2793 case OC_DEVICE_ID1:
2794 adapter->generation = BE_GEN2;
2795 break;
2796 case BE_DEVICE_ID2:
2797 case OC_DEVICE_ID2:
2798 adapter->generation = BE_GEN3;
2799 break;
2800 default:
2801 adapter->generation = 0;
2802 }
2803
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002804 adapter->pdev = pdev;
2805 pci_set_drvdata(pdev, adapter);
2806 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002807 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002808
Yang Hongyange9304382009-04-13 14:40:14 -07002809 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002810 if (!status) {
2811 netdev->features |= NETIF_F_HIGHDMA;
2812 } else {
Yang Hongyange9304382009-04-13 14:40:14 -07002813 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002814 if (status) {
2815 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2816 goto free_netdev;
2817 }
2818 }
2819
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002820 be_sriov_enable(adapter);
2821
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002822 status = be_ctrl_init(adapter);
2823 if (status)
2824 goto free_netdev;
2825
Sathya Perla2243e2e2009-11-22 22:02:03 +00002826 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002827 if (be_physfn(adapter)) {
2828 status = be_cmd_POST(adapter);
2829 if (status)
2830 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002831 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002832
2833 /* tell fw we're ready to fire cmds */
2834 status = be_cmd_fw_init(adapter);
2835 if (status)
2836 goto ctrl_clean;
2837
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07002838 if (be_physfn(adapter)) {
2839 status = be_cmd_reset_function(adapter);
2840 if (status)
2841 goto ctrl_clean;
2842 }
2843
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002844 status = be_stats_init(adapter);
2845 if (status)
2846 goto ctrl_clean;
2847
Sathya Perla2243e2e2009-11-22 22:02:03 +00002848 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002849 if (status)
2850 goto stats_clean;
2851
Sathya Perla3abcded2010-10-03 22:12:27 -07002852 be_msix_enable(adapter);
2853
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002854 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002855
Sathya Perla5fb379e2009-06-18 00:02:59 +00002856 status = be_setup(adapter);
2857 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002858 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002859
Sathya Perla3abcded2010-10-03 22:12:27 -07002860 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002861 status = register_netdev(netdev);
2862 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002863 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002864
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002865 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002866 return 0;
2867
Sathya Perla5fb379e2009-06-18 00:02:59 +00002868unsetup:
2869 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07002870msix_disable:
2871 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002872stats_clean:
2873 be_stats_cleanup(adapter);
2874ctrl_clean:
2875 be_ctrl_cleanup(adapter);
2876free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002877 be_sriov_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002878 free_netdev(adapter->netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002879 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002880rel_reg:
2881 pci_release_regions(pdev);
2882disable_dev:
2883 pci_disable_device(pdev);
2884do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002885 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002886 return status;
2887}
2888
2889static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2890{
2891 struct be_adapter *adapter = pci_get_drvdata(pdev);
2892 struct net_device *netdev = adapter->netdev;
2893
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002894 if (adapter->wol)
2895 be_setup_wol(adapter, true);
2896
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002897 netif_device_detach(netdev);
2898 if (netif_running(netdev)) {
2899 rtnl_lock();
2900 be_close(netdev);
2901 rtnl_unlock();
2902 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002903 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00002904 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002905
2906 pci_save_state(pdev);
2907 pci_disable_device(pdev);
2908 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2909 return 0;
2910}
2911
2912static int be_resume(struct pci_dev *pdev)
2913{
2914 int status = 0;
2915 struct be_adapter *adapter = pci_get_drvdata(pdev);
2916 struct net_device *netdev = adapter->netdev;
2917
2918 netif_device_detach(netdev);
2919
2920 status = pci_enable_device(pdev);
2921 if (status)
2922 return status;
2923
2924 pci_set_power_state(pdev, 0);
2925 pci_restore_state(pdev);
2926
Sathya Perla2243e2e2009-11-22 22:02:03 +00002927 /* tell fw we're ready to fire cmds */
2928 status = be_cmd_fw_init(adapter);
2929 if (status)
2930 return status;
2931
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00002932 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933 if (netif_running(netdev)) {
2934 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002935 be_open(netdev);
2936 rtnl_unlock();
2937 }
2938 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002939
2940 if (adapter->wol)
2941 be_setup_wol(adapter, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002942 return 0;
2943}
2944
Sathya Perla82456b02010-02-17 01:35:37 +00002945/*
2946 * An FLR will stop BE from DMAing any data.
2947 */
2948static void be_shutdown(struct pci_dev *pdev)
2949{
2950 struct be_adapter *adapter = pci_get_drvdata(pdev);
2951 struct net_device *netdev = adapter->netdev;
2952
2953 netif_device_detach(netdev);
2954
2955 be_cmd_reset_function(adapter);
2956
2957 if (adapter->wol)
2958 be_setup_wol(adapter, true);
2959
2960 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00002961}
2962
Sathya Perlacf588472010-02-14 21:22:01 +00002963static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2964 pci_channel_state_t state)
2965{
2966 struct be_adapter *adapter = pci_get_drvdata(pdev);
2967 struct net_device *netdev = adapter->netdev;
2968
2969 dev_err(&adapter->pdev->dev, "EEH error detected\n");
2970
2971 adapter->eeh_err = true;
2972
2973 netif_device_detach(netdev);
2974
2975 if (netif_running(netdev)) {
2976 rtnl_lock();
2977 be_close(netdev);
2978 rtnl_unlock();
2979 }
2980 be_clear(adapter);
2981
2982 if (state == pci_channel_io_perm_failure)
2983 return PCI_ERS_RESULT_DISCONNECT;
2984
2985 pci_disable_device(pdev);
2986
2987 return PCI_ERS_RESULT_NEED_RESET;
2988}
2989
2990static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2991{
2992 struct be_adapter *adapter = pci_get_drvdata(pdev);
2993 int status;
2994
2995 dev_info(&adapter->pdev->dev, "EEH reset\n");
2996 adapter->eeh_err = false;
2997
2998 status = pci_enable_device(pdev);
2999 if (status)
3000 return PCI_ERS_RESULT_DISCONNECT;
3001
3002 pci_set_master(pdev);
3003 pci_set_power_state(pdev, 0);
3004 pci_restore_state(pdev);
3005
3006 /* Check if card is ok and fw is ready */
3007 status = be_cmd_POST(adapter);
3008 if (status)
3009 return PCI_ERS_RESULT_DISCONNECT;
3010
3011 return PCI_ERS_RESULT_RECOVERED;
3012}
3013
3014static void be_eeh_resume(struct pci_dev *pdev)
3015{
3016 int status = 0;
3017 struct be_adapter *adapter = pci_get_drvdata(pdev);
3018 struct net_device *netdev = adapter->netdev;
3019
3020 dev_info(&adapter->pdev->dev, "EEH resume\n");
3021
3022 pci_save_state(pdev);
3023
3024 /* tell fw we're ready to fire cmds */
3025 status = be_cmd_fw_init(adapter);
3026 if (status)
3027 goto err;
3028
3029 status = be_setup(adapter);
3030 if (status)
3031 goto err;
3032
3033 if (netif_running(netdev)) {
3034 status = be_open(netdev);
3035 if (status)
3036 goto err;
3037 }
3038 netif_device_attach(netdev);
3039 return;
3040err:
3041 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003042}
3043
3044static struct pci_error_handlers be_eeh_handlers = {
3045 .error_detected = be_eeh_err_detected,
3046 .slot_reset = be_eeh_reset,
3047 .resume = be_eeh_resume,
3048};
3049
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003050static struct pci_driver be_driver = {
3051 .name = DRV_NAME,
3052 .id_table = be_dev_ids,
3053 .probe = be_probe,
3054 .remove = be_remove,
3055 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003056 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003057 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003058 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003059};
3060
3061static int __init be_init_module(void)
3062{
Joe Perches8e95a202009-12-03 07:58:21 +00003063 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3064 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003065 printk(KERN_WARNING DRV_NAME
3066 " : Module param rx_frag_size must be 2048/4096/8192."
3067 " Using 2048\n");
3068 rx_frag_size = 2048;
3069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003071 if (num_vfs > 32) {
3072 printk(KERN_WARNING DRV_NAME
3073 " : Module param num_vfs must not be greater than 32."
3074 "Using 32\n");
3075 num_vfs = 32;
3076 }
3077
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003078 return pci_register_driver(&be_driver);
3079}
3080module_init(be_init_module);
3081
3082static void __exit be_exit_module(void)
3083{
3084 pci_unregister_driver(&be_driver);
3085}
3086module_exit(be_exit_module);