blob: 1f5e342dd883e049c01e507503a473fb8e73611d [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030module_param(rx_frag_size, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
Sathya Perla3abcded2010-10-03 22:12:27 -0700119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130}
131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143 if (!mem->va)
144 return -1;
145 memset(mem->va, 0, mem->size);
146 return 0;
147}
148
Sathya Perla8788fdc2009-07-27 22:52:03 +0000149static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000151 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 u32 reg = ioread32(addr);
153 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perlacf588472010-02-14 21:22:01 +0000155 if (adapter->eeh_err)
156 return;
157
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700159 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 iowrite32(reg, addr);
166}
167
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169{
170 u32 val = 0;
171 val |= qid & DB_RQ_RING_ID_MASK;
172 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000173
174 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000175 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176}
177
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179{
180 u32 val = 0;
181 val |= qid & DB_TXULP_RING_ID_MASK;
182 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000183
184 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189 bool arm, bool clear_int, u16 num_popped)
190{
191 u32 val = 0;
192 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000193 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000195
196 if (adapter->eeh_err)
197 return;
198
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199 if (arm)
200 val |= 1 << DB_EQ_REARM_SHIFT;
201 if (clear_int)
202 val |= 1 << DB_EQ_CLR_SHIFT;
203 val |= 1 << DB_EQ_EVNT_SHIFT;
204 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000205 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209{
210 u32 val = 0;
211 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
215 if (adapter->eeh_err)
216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_CQ_REARM_SHIFT;
220 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000221 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222}
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224static int be_mac_addr_set(struct net_device *netdev, void *p)
225{
226 struct be_adapter *adapter = netdev_priv(netdev);
227 struct sockaddr *addr = p;
228 int status = 0;
229
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000230 if (!is_valid_ether_addr(addr->sa_data))
231 return -EADDRNOTAVAIL;
232
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000233 /* MAC addr configuration will be done in hardware for VFs
234 * by their corresponding PFs. Just copy to netdev addr here
235 */
236 if (!be_physfn(adapter))
237 goto netdev_addr;
238
Ajit Khapardef8617e02011-02-11 13:36:37 +0000239 status = be_cmd_pmac_del(adapter, adapter->if_handle,
240 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000241 if (status)
242 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000245 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000246netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 if (!status)
248 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250 return status;
251}
252
Sathya Perlab31c50a2009-09-17 10:30:13 -0700253void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254{
Sathya Perla3abcded2010-10-03 22:12:27 -0700255 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257 struct be_port_rxf_stats *port_stats =
258 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700259 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000260 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700261 struct be_rx_obj *rxo;
262 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700263
Sathya Perla3abcded2010-10-03 22:12:27 -0700264 memset(dev_stats, 0, sizeof(*dev_stats));
265 for_all_rx_queues(adapter, rxo, i) {
266 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269 /* no space in linux buffers: best possible approximation */
270 dev_stats->rx_dropped +=
271 erx_stats->rx_drops_no_fragments[rxo->q.id];
272 }
273
274 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700276
277 /* bad pkts received */
278 dev_stats->rx_errors = port_stats->rx_crc_errors +
279 port_stats->rx_alignment_symbol_errors +
280 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000281 port_stats->rx_out_range_errors +
282 port_stats->rx_frame_too_long +
283 port_stats->rx_dropped_too_small +
284 port_stats->rx_dropped_too_short +
285 port_stats->rx_dropped_header_too_small +
286 port_stats->rx_dropped_tcp_length +
287 port_stats->rx_dropped_runt +
288 port_stats->rx_tcp_checksum_errs +
289 port_stats->rx_ip_checksum_errs +
290 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700291
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292 /* detailed rx errors */
293 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000294 port_stats->rx_out_range_errors +
295 port_stats->rx_frame_too_long;
296
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700297 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299 /* frame alignment errors */
300 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000301
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700302 /* receiver fifo overrun */
303 /* drops_no_pbuf is no per i/f, it's per BE card */
304 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305 port_stats->rx_input_fifo_overflow +
306 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700307}
308
Sathya Perla8788fdc2009-07-27 22:52:03 +0000309void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311 struct net_device *netdev = adapter->netdev;
312
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700313 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000314 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000315 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000316 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700317 netif_carrier_on(netdev);
318 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000319 } else {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000320 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700322 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000323 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700324 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700325}
326
327/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700328static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700329{
Sathya Perla3abcded2010-10-03 22:12:27 -0700330 struct be_eq_obj *rx_eq = &rxo->rx_eq;
331 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700332 ulong now = jiffies;
333 u32 eqd;
334
335 if (!rx_eq->enable_aic)
336 return;
337
338 /* Wrapped around */
339 if (time_before(now, stats->rx_fps_jiffies)) {
340 stats->rx_fps_jiffies = now;
341 return;
342 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700343
344 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700345 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700346 return;
347
Sathya Perla3abcded2010-10-03 22:12:27 -0700348 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700349 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700350
Sathya Perla4097f662009-03-24 16:40:13 -0700351 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700352 stats->prev_rx_frags = stats->rx_frags;
353 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700354 eqd = eqd << 3;
355 if (eqd > rx_eq->max_eqd)
356 eqd = rx_eq->max_eqd;
357 if (eqd < rx_eq->min_eqd)
358 eqd = rx_eq->min_eqd;
359 if (eqd < 10)
360 eqd = 0;
361 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000362 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700363
364 rx_eq->cur_eqd = eqd;
365}
366
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700367static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368{
369 u64 rate = bytes;
370
371 do_div(rate, ticks / HZ);
372 rate <<= 3; /* bytes/sec -> bits/sec */
373 do_div(rate, 1000000ul); /* MB/Sec */
374
375 return rate;
376}
377
Sathya Perla4097f662009-03-24 16:40:13 -0700378static void be_tx_rate_update(struct be_adapter *adapter)
379{
Sathya Perla3abcded2010-10-03 22:12:27 -0700380 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700381 ulong now = jiffies;
382
383 /* Wrapped around? */
384 if (time_before(now, stats->be_tx_jiffies)) {
385 stats->be_tx_jiffies = now;
386 return;
387 }
388
389 /* Update tx rate once in two seconds */
390 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700391 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392 - stats->be_tx_bytes_prev,
393 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700394 stats->be_tx_jiffies = now;
395 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396 }
397}
398
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700399static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000400 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700401{
Sathya Perla3abcded2010-10-03 22:12:27 -0700402 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700403 stats->be_tx_reqs++;
404 stats->be_tx_wrbs += wrb_cnt;
405 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000406 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700407 if (stopped)
408 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409}
410
411/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000412static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700414{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700415 int cnt = (skb->len > skb->data_len);
416
417 cnt += skb_shinfo(skb)->nr_frags;
418
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700419 /* to account for hdr wrb */
420 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700424 /* add a dummy to make it an even num */
425 cnt++;
426 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000427 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429 return cnt;
430}
431
432static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433{
434 wrb->frag_pa_hi = upper_32_bits(addr);
435 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437}
438
Somnath Koturcc4ce022010-10-21 07:11:14 -0700439static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700442 u8 vlan_prio = 0;
443 u16 vlan_tag = 0;
444
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700445 memset(hdr, 0, sizeof(*hdr));
446
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000449 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
464 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466 if (is_tcp_pkt(skb))
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468 else if (is_udp_pkt(skb))
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470 }
471
Somnath Koturcc4ce022010-10-21 07:11:14 -0700472 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700473 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700474 vlan_tag = vlan_tx_tag_get(skb);
475 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476 /* If vlan priority provided by OS is NOT in available bmap */
477 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479 adapter->recommended_prio;
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481 }
482
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487}
488
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000489static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000490 bool unmap_single)
491{
492 dma_addr_t dma;
493
494 be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000497 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000498 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000499 dma_unmap_single(dev, dma, wrb->frag_len,
500 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000501 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000502 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000503 }
504}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700505
506static int make_tx_wrbs(struct be_adapter *adapter,
507 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508{
Sathya Perla7101e112010-03-22 20:41:12 +0000509 dma_addr_t busaddr;
510 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000511 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 struct sk_buff *first_skb = skb;
513 struct be_queue_info *txq = &adapter->tx_obj.q;
514 struct be_eth_wrb *wrb;
515 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000516 bool map_single = false;
517 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700519 hdr = queue_head_node(txq);
520 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000521 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522
David S. Millerebc8d2a2009-06-09 01:01:31 -0700523 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700524 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000525 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000527 goto dma_err;
528 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, busaddr, len);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532 queue_head_inc(txq);
533 copied += len;
534 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535
David S. Millerebc8d2a2009-06-09 01:01:31 -0700536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537 struct skb_frag_struct *frag =
538 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000539 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540 frag->size, DMA_TO_DEVICE);
541 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000542 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700543 wrb = queue_head_node(txq);
544 wrb_fill(wrb, busaddr, frag->size);
545 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546 queue_head_inc(txq);
547 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 }
549
550 if (dummy_wrb) {
551 wrb = queue_head_node(txq);
552 wrb_fill(wrb, 0, 0);
553 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554 queue_head_inc(txq);
555 }
556
Somnath Koturcc4ce022010-10-21 07:11:14 -0700557 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700558 be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000561dma_err:
562 txq->head = map_head;
563 while (copied) {
564 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000565 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000566 map_single = false;
567 copied -= wrb->frag_len;
568 queue_head_inc(txq);
569 }
570 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571}
572
Stephen Hemminger613573252009-08-31 19:50:58 +0000573static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700574 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575{
576 struct be_adapter *adapter = netdev_priv(netdev);
577 struct be_tx_obj *tx_obj = &adapter->tx_obj;
578 struct be_queue_info *txq = &tx_obj->q;
579 u32 wrb_cnt = 0, copied = 0;
580 u32 start = txq->head;
581 bool dummy_wrb, stopped = false;
582
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000583 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584
585 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000586 if (copied) {
587 /* record the sent skb in the sent_skb table */
588 BUG_ON(tx_obj->sent_skb_list[start]);
589 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000591 /* Ensure txq has space for the next skb; Else stop the queue
592 * *BEFORE* ringing the tx doorbell, so that we serialze the
593 * tx compls of the current transmit which'll wake up the queue
594 */
Sathya Perla7101e112010-03-22 20:41:12 +0000595 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000596 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597 txq->len) {
598 netif_stop_queue(netdev);
599 stopped = true;
600 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000602 be_txq_notify(adapter, txq->id, wrb_cnt);
603
Ajit Khaparde91992e42010-02-19 13:57:12 +0000604 be_tx_stats_update(adapter, wrb_cnt, copied,
605 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000606 } else {
607 txq->head = start;
608 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 return NETDEV_TX_OK;
611}
612
613static int be_change_mtu(struct net_device *netdev, int new_mtu)
614{
615 struct be_adapter *adapter = netdev_priv(netdev);
616 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000617 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619 dev_info(&adapter->pdev->dev,
620 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000621 BE_MIN_MTU,
622 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623 return -EINVAL;
624 }
625 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626 netdev->mtu, new_mtu);
627 netdev->mtu = new_mtu;
628 return 0;
629}
630
631/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000632 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000635static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637 u16 vtag[BE_NUM_VLANS_SUPPORTED];
638 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000639 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000640 u32 if_handle;
641
642 if (vf) {
643 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647
Ajit Khaparde82903e42010-02-09 01:34:57 +0000648 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000650 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 if (adapter->vlan_tag[i]) {
652 vtag[ntags] = cpu_to_le16(i);
653 ntags++;
654 }
655 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700656 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700659 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000662
Sathya Perlab31c50a2009-09-17 10:30:13 -0700663 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664}
665
666static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667{
668 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671}
672
673static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674{
675 struct be_adapter *adapter = netdev_priv(netdev);
676
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000677 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000678 if (!be_physfn(adapter))
679 return;
680
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000682 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000683 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684}
685
686static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687{
688 struct be_adapter *adapter = netdev_priv(netdev);
689
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000690 adapter->vlans_added--;
691 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000693 if (!be_physfn(adapter))
694 return;
695
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000697 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000698 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699}
700
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701static void be_set_multicast_list(struct net_device *netdev)
702{
703 struct be_adapter *adapter = netdev_priv(netdev);
704
705 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000706 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000707 adapter->promiscuous = true;
708 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000710
711 /* BE was previously in promiscous mode; disable it */
712 if (adapter->promiscuous) {
713 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000714 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000715 }
716
Sathya Perlae7b909a2009-11-22 22:01:10 +0000717 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000718 if (netdev->flags & IFF_ALLMULTI ||
719 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000720 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000721 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000722 goto done;
723 }
724
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000725 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800726 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000727done:
728 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729}
730
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000731static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732{
733 struct be_adapter *adapter = netdev_priv(netdev);
734 int status;
735
736 if (!adapter->sriov_enabled)
737 return -EPERM;
738
739 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740 return -EINVAL;
741
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000742 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743 status = be_cmd_pmac_del(adapter,
744 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000745 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000746
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000747 status = be_cmd_pmac_add(adapter, mac,
748 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000749 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000750
751 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000752 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000754 else
755 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000757 return status;
758}
759
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000760static int be_get_vf_config(struct net_device *netdev, int vf,
761 struct ifla_vf_info *vi)
762{
763 struct be_adapter *adapter = netdev_priv(netdev);
764
765 if (!adapter->sriov_enabled)
766 return -EPERM;
767
768 if (vf >= num_vfs)
769 return -EINVAL;
770
771 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000772 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000773 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000774 vi->qos = 0;
775 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777 return 0;
778}
779
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000780static int be_set_vf_vlan(struct net_device *netdev,
781 int vf, u16 vlan, u8 qos)
782{
783 struct be_adapter *adapter = netdev_priv(netdev);
784 int status = 0;
785
786 if (!adapter->sriov_enabled)
787 return -EPERM;
788
789 if ((vf >= num_vfs) || (vlan > 4095))
790 return -EINVAL;
791
792 if (vlan) {
793 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794 adapter->vlans_added++;
795 } else {
796 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797 adapter->vlans_added--;
798 }
799
800 status = be_vid_config(adapter, true, vf);
801
802 if (status)
803 dev_info(&adapter->pdev->dev,
804 "VLAN %d config on VF %d failed\n", vlan, vf);
805 return status;
806}
807
Ajit Khapardee1d18732010-07-23 01:52:13 +0000808static int be_set_vf_tx_rate(struct net_device *netdev,
809 int vf, int rate)
810{
811 struct be_adapter *adapter = netdev_priv(netdev);
812 int status = 0;
813
814 if (!adapter->sriov_enabled)
815 return -EPERM;
816
817 if ((vf >= num_vfs) || (rate < 0))
818 return -EINVAL;
819
820 if (rate > 10000)
821 rate = 10000;
822
823 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000824 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000825
826 if (status)
827 dev_info(&adapter->pdev->dev,
828 "tx rate %d on VF %d failed\n", rate, vf);
829 return status;
830}
831
Sathya Perla3abcded2010-10-03 22:12:27 -0700832static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833{
Sathya Perla3abcded2010-10-03 22:12:27 -0700834 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700835 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836
Sathya Perla4097f662009-03-24 16:40:13 -0700837 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700838 if (time_before(now, stats->rx_jiffies)) {
839 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700840 return;
841 }
842
843 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700844 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700845 return;
846
Sathya Perla3abcded2010-10-03 22:12:27 -0700847 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848 now - stats->rx_jiffies);
849 stats->rx_jiffies = now;
850 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700851}
852
Sathya Perla3abcded2010-10-03 22:12:27 -0700853static void be_rx_stats_update(struct be_rx_obj *rxo,
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000854 u32 pktsize, u16 numfrags, u8 pkt_type)
Sathya Perla4097f662009-03-24 16:40:13 -0700855{
Sathya Perla3abcded2010-10-03 22:12:27 -0700856 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700857
Sathya Perla3abcded2010-10-03 22:12:27 -0700858 stats->rx_compl++;
859 stats->rx_frags += numfrags;
860 stats->rx_bytes += pktsize;
861 stats->rx_pkts++;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000862 if (pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700863 stats->rx_mcast_pkts++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864}
865
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000866static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700867{
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000868 u8 l4_cksm, ipv6, ipcksm;
Ajit Khaparde728a9972009-04-13 15:41:22 -0700869
870 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
871 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000872 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700873
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000874 /* Ignore ipcksm for ipv6 pkts */
875 return l4_cksm && (ipcksm || ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700876}
877
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700879get_rx_page_info(struct be_adapter *adapter,
880 struct be_rx_obj *rxo,
881 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700882{
883 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700884 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700885
Sathya Perla3abcded2010-10-03 22:12:27 -0700886 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700887 BUG_ON(!rx_page_info->page);
888
Ajit Khaparde205859a2010-02-09 01:34:21 +0000889 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000890 dma_unmap_page(&adapter->pdev->dev,
891 dma_unmap_addr(rx_page_info, bus),
892 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000893 rx_page_info->last_page_user = false;
894 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700895
896 atomic_dec(&rxq->used);
897 return rx_page_info;
898}
899
900/* Throwaway the data in the Rx completion */
901static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700902 struct be_rx_obj *rxo,
903 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700904{
Sathya Perla3abcded2010-10-03 22:12:27 -0700905 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700906 struct be_rx_page_info *page_info;
907 u16 rxq_idx, i, num_rcvd;
908
909 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
910 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
911
Sathya Perla64642812010-12-01 01:04:17 +0000912 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
913 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
914
915 rxo->last_frag_index = rxq_idx;
916
917 for (i = 0; i < num_rcvd; i++) {
918 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
919 put_page(page_info->page);
920 memset(page_info, 0, sizeof(*page_info));
921 index_inc(&rxq_idx, rxq->len);
922 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700923 }
924}
925
926/*
927 * skb_fill_rx_data forms a complete skb for an ether frame
928 * indicated by rxcp.
929 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700930static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla89420422010-02-17 01:35:26 +0000931 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
932 u16 num_rcvd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933{
Sathya Perla3abcded2010-10-03 22:12:27 -0700934 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935 struct be_rx_page_info *page_info;
Sathya Perla89420422010-02-17 01:35:26 +0000936 u16 rxq_idx, i, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700937 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700938 u8 *start;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000939 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700940
941 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
942 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000943 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944
Sathya Perla3abcded2010-10-03 22:12:27 -0700945 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946
947 start = page_address(page_info->page) + page_info->page_offset;
948 prefetch(start);
949
950 /* Copy data in the first descriptor of this completion */
951 curr_frag_len = min(pktsize, rx_frag_size);
952
953 /* Copy the header portion into skb_data */
954 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
955 memcpy(skb->data, start, hdr_len);
956 skb->len = curr_frag_len;
957 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
958 /* Complete packet has now been moved to data */
959 put_page(page_info->page);
960 skb->data_len = 0;
961 skb->tail += curr_frag_len;
962 } else {
963 skb_shinfo(skb)->nr_frags = 1;
964 skb_shinfo(skb)->frags[0].page = page_info->page;
965 skb_shinfo(skb)->frags[0].page_offset =
966 page_info->page_offset + hdr_len;
967 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
968 skb->data_len = curr_frag_len - hdr_len;
969 skb->tail += hdr_len;
970 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000971 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700972
973 if (pktsize <= rx_frag_size) {
974 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000975 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700976 }
977
978 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700979 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000980 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700981 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982 index_inc(&rxq_idx, rxq->len);
Sathya Perla3abcded2010-10-03 22:12:27 -0700983 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984
Ajit Khapardefa774062009-07-22 09:28:55 -0700985 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700986
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000987 /* Coalesce all frags from the same physical page in one slot */
988 if (page_info->page_offset == 0) {
989 /* Fresh page */
990 j++;
991 skb_shinfo(skb)->frags[j].page = page_info->page;
992 skb_shinfo(skb)->frags[j].page_offset =
993 page_info->page_offset;
994 skb_shinfo(skb)->frags[j].size = 0;
995 skb_shinfo(skb)->nr_frags++;
996 } else {
997 put_page(page_info->page);
998 }
999
1000 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001001 skb->len += curr_frag_len;
1002 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001003
Ajit Khaparde205859a2010-02-09 01:34:21 +00001004 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001005 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001006 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001007
Sathya Perla76fbb422009-06-10 02:21:56 +00001008done:
Sathya Perla3abcded2010-10-03 22:12:27 -07001009 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010}
1011
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001012/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001013static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001014 struct be_rx_obj *rxo,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015 struct be_eth_rx_compl *rxcp)
1016{
1017 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001018 u32 vlanf, vid;
Sathya Perla89420422010-02-17 01:35:26 +00001019 u16 num_rcvd;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001020 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001021
Sathya Perla89420422010-02-17 01:35:26 +00001022 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
Sathya Perla89420422010-02-17 01:35:26 +00001023
Eric Dumazet89d71a62009-10-13 05:34:20 +00001024 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001025 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001026 if (net_ratelimit())
1027 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001028 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029 return;
1030 }
1031
Sathya Perla3abcded2010-10-03 22:12:27 -07001032 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001034 if (likely(adapter->rx_csum && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001035 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001036 else
1037 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038
1039 skb->truesize = skb->len + sizeof(struct sk_buff);
1040 skb->protocol = eth_type_trans(skb, adapter->netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Sathya Perlaa058a632010-02-17 01:34:22 +00001042 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1043 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1044
1045 /* vlanf could be wrongly set in some cards.
1046 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001047 if ((adapter->function_mode & 0x400) && !vtm)
Sathya Perlaa058a632010-02-17 01:34:22 +00001048 vlanf = 0;
1049
1050 if (unlikely(vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001051 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 kfree_skb(skb);
1053 return;
1054 }
1055 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001056 if (!lancer_chip(adapter))
1057 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1059 } else {
1060 netif_receive_skb(skb);
1061 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062}
1063
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001064/* Process the RX completion indicated by rxcp when GRO is enabled */
1065static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001066 struct be_rx_obj *rxo,
1067 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068{
1069 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001070 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001071 struct be_queue_info *rxq = &rxo->q;
1072 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001074 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001075 u8 vtm;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001076 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077
1078 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1079 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1080 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1081 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001082 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001083 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001084
1085 /* vlanf could be wrongly set in some cards.
1086 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001087 if ((adapter->function_mode & 0x400) && !vtm)
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001088 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001090 skb = napi_get_frags(&eq_obj->napi);
1091 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001092 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001093 return;
1094 }
1095
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001097 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001098 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099
1100 curr_frag_len = min(remaining, rx_frag_size);
1101
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001102 /* Coalesce all frags from the same physical page in one slot */
1103 if (i == 0 || page_info->page_offset == 0) {
1104 /* First frag or Fresh page */
1105 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001106 skb_shinfo(skb)->frags[j].page = page_info->page;
1107 skb_shinfo(skb)->frags[j].page_offset =
1108 page_info->page_offset;
1109 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001110 } else {
1111 put_page(page_info->page);
1112 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001113 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001114
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117 memset(page_info, 0, sizeof(*page_info));
1118 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001119 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001121 skb_shinfo(skb)->nr_frags = j + 1;
1122 skb->len = pkt_size;
1123 skb->data_len = pkt_size;
1124 skb->truesize += pkt_size;
1125 skb->ip_summed = CHECKSUM_UNNECESSARY;
1126
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001128 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 } else {
1130 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001131 if (!lancer_chip(adapter))
1132 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133
Ajit Khaparde82903e42010-02-09 01:34:57 +00001134 if (!adapter->vlan_grp || adapter->vlans_added == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135 return;
1136
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001137 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138 }
1139
Sathya Perla3abcded2010-10-03 22:12:27 -07001140 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141}
1142
Sathya Perla3abcded2010-10-03 22:12:27 -07001143static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144{
Sathya Perla3abcded2010-10-03 22:12:27 -07001145 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146
1147 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1148 return NULL;
1149
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001150 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1152
Sathya Perla3abcded2010-10-03 22:12:27 -07001153 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154 return rxcp;
1155}
1156
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001157/* To reset the valid bit, we need to reset the whole word as
1158 * when walking the queue the valid entries are little-endian
1159 * and invalid entries are host endian
1160 */
1161static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1162{
1163 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1164}
1165
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166static inline struct page *be_alloc_pages(u32 size)
1167{
1168 gfp_t alloc_flags = GFP_ATOMIC;
1169 u32 order = get_order(size);
1170 if (order > 0)
1171 alloc_flags |= __GFP_COMP;
1172 return alloc_pages(alloc_flags, order);
1173}
1174
1175/*
1176 * Allocate a page, split it to fragments of size rx_frag_size and post as
1177 * receive buffers to BE
1178 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001179static void be_post_rx_frags(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180{
Sathya Perla3abcded2010-10-03 22:12:27 -07001181 struct be_adapter *adapter = rxo->adapter;
1182 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001183 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001184 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185 struct page *pagep = NULL;
1186 struct be_eth_rx_d *rxd;
1187 u64 page_dmaaddr = 0, frag_dmaaddr;
1188 u32 posted, page_offset = 0;
1189
Sathya Perla3abcded2010-10-03 22:12:27 -07001190 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1192 if (!pagep) {
1193 pagep = be_alloc_pages(adapter->big_page_size);
1194 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001195 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196 break;
1197 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001198 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1199 0, adapter->big_page_size,
1200 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201 page_info->page_offset = 0;
1202 } else {
1203 get_page(pagep);
1204 page_info->page_offset = page_offset + rx_frag_size;
1205 }
1206 page_offset = page_info->page_offset;
1207 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001208 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1210
1211 rxd = queue_head_node(rxq);
1212 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1213 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214
1215 /* Any space left in the current big page for another frag? */
1216 if ((page_offset + rx_frag_size + rx_frag_size) >
1217 adapter->big_page_size) {
1218 pagep = NULL;
1219 page_info->last_page_user = true;
1220 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001221
1222 prev_page_info = page_info;
1223 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 page_info = &page_info_tbl[rxq->head];
1225 }
1226 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001227 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001228
1229 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001231 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001232 } else if (atomic_read(&rxq->used) == 0) {
1233 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001234 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001236}
1237
Sathya Perla5fb379e2009-06-18 00:02:59 +00001238static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1241
1242 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1243 return NULL;
1244
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001245 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1247
1248 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1249
1250 queue_tail_inc(tx_cq);
1251 return txcp;
1252}
1253
1254static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1255{
1256 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001257 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1259 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001260 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1261 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001262
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001263 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001265 sent_skbs[txq->tail] = NULL;
1266
1267 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001268 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001270 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001272 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001273 unmap_tx_frag(&adapter->pdev->dev, wrb,
1274 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001275 unmap_skb_hdr = false;
1276
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 num_wrbs++;
1278 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001279 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280
1281 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001282
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283 kfree_skb(sent_skb);
1284}
1285
Sathya Perla859b1e42009-08-10 03:43:51 +00001286static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1287{
1288 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1289
1290 if (!eqe->evt)
1291 return NULL;
1292
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001293 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001294 eqe->evt = le32_to_cpu(eqe->evt);
1295 queue_tail_inc(&eq_obj->q);
1296 return eqe;
1297}
1298
1299static int event_handle(struct be_adapter *adapter,
1300 struct be_eq_obj *eq_obj)
1301{
1302 struct be_eq_entry *eqe;
1303 u16 num = 0;
1304
1305 while ((eqe = event_get(eq_obj)) != NULL) {
1306 eqe->evt = 0;
1307 num++;
1308 }
1309
1310 /* Deal with any spurious interrupts that come
1311 * without events
1312 */
1313 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1314 if (num)
1315 napi_schedule(&eq_obj->napi);
1316
1317 return num;
1318}
1319
1320/* Just read and notify events without processing them.
1321 * Used at the time of destroying event queues */
1322static void be_eq_clean(struct be_adapter *adapter,
1323 struct be_eq_obj *eq_obj)
1324{
1325 struct be_eq_entry *eqe;
1326 u16 num = 0;
1327
1328 while ((eqe = event_get(eq_obj)) != NULL) {
1329 eqe->evt = 0;
1330 num++;
1331 }
1332
1333 if (num)
1334 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1335}
1336
Sathya Perla3abcded2010-10-03 22:12:27 -07001337static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338{
1339 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001340 struct be_queue_info *rxq = &rxo->q;
1341 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342 struct be_eth_rx_compl *rxcp;
1343 u16 tail;
1344
1345 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001346 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1347 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001348 be_rx_compl_reset(rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001349 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350 }
1351
1352 /* Then free posted rx buffer that were not used */
1353 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001354 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001355 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356 put_page(page_info->page);
1357 memset(page_info, 0, sizeof(*page_info));
1358 }
1359 BUG_ON(atomic_read(&rxq->used));
1360}
1361
Sathya Perlaa8e91792009-08-10 03:42:43 +00001362static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001364 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001366 struct be_eth_tx_compl *txcp;
1367 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001368 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1369 struct sk_buff *sent_skb;
1370 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371
Sathya Perlaa8e91792009-08-10 03:42:43 +00001372 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1373 do {
1374 while ((txcp = be_tx_compl_get(tx_cq))) {
1375 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1376 wrb_index, txcp);
1377 be_tx_compl_process(adapter, end_idx);
1378 cmpl++;
1379 }
1380 if (cmpl) {
1381 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1382 cmpl = 0;
1383 }
1384
1385 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1386 break;
1387
1388 mdelay(1);
1389 } while (true);
1390
1391 if (atomic_read(&txq->used))
1392 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1393 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001394
1395 /* free posted tx for which compls will never arrive */
1396 while (atomic_read(&txq->used)) {
1397 sent_skb = sent_skbs[txq->tail];
1398 end_idx = txq->tail;
1399 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001400 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1401 txq->len);
Sathya Perlab03388d2010-02-18 00:37:17 +00001402 be_tx_compl_process(adapter, end_idx);
1403 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404}
1405
Sathya Perla5fb379e2009-06-18 00:02:59 +00001406static void be_mcc_queues_destroy(struct be_adapter *adapter)
1407{
1408 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001409
Sathya Perla8788fdc2009-07-27 22:52:03 +00001410 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001411 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001412 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001413 be_queue_free(adapter, q);
1414
Sathya Perla8788fdc2009-07-27 22:52:03 +00001415 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001416 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001417 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001418 be_queue_free(adapter, q);
1419}
1420
1421/* Must be called only after TX qs are created as MCC shares TX EQ */
1422static int be_mcc_queues_create(struct be_adapter *adapter)
1423{
1424 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001425
1426 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001427 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001428 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001429 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001430 goto err;
1431
1432 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001433 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001434 goto mcc_cq_free;
1435
1436 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001437 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001438 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1439 goto mcc_cq_destroy;
1440
1441 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001442 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001443 goto mcc_q_free;
1444
1445 return 0;
1446
1447mcc_q_free:
1448 be_queue_free(adapter, q);
1449mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001450 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001451mcc_cq_free:
1452 be_queue_free(adapter, cq);
1453err:
1454 return -1;
1455}
1456
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457static void be_tx_queues_destroy(struct be_adapter *adapter)
1458{
1459 struct be_queue_info *q;
1460
1461 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001462 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001463 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464 be_queue_free(adapter, q);
1465
1466 q = &adapter->tx_obj.cq;
1467 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001468 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469 be_queue_free(adapter, q);
1470
Sathya Perla859b1e42009-08-10 03:43:51 +00001471 /* Clear any residual events */
1472 be_eq_clean(adapter, &adapter->tx_eq);
1473
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474 q = &adapter->tx_eq.q;
1475 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001476 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 be_queue_free(adapter, q);
1478}
1479
1480static int be_tx_queues_create(struct be_adapter *adapter)
1481{
1482 struct be_queue_info *eq, *q, *cq;
1483
1484 adapter->tx_eq.max_eqd = 0;
1485 adapter->tx_eq.min_eqd = 0;
1486 adapter->tx_eq.cur_eqd = 96;
1487 adapter->tx_eq.enable_aic = false;
1488 /* Alloc Tx Event queue */
1489 eq = &adapter->tx_eq.q;
1490 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1491 return -1;
1492
1493 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001494 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001496
1497 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1498
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001499
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 /* Alloc TX eth compl queue */
1501 cq = &adapter->tx_obj.cq;
1502 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1503 sizeof(struct be_eth_tx_compl)))
1504 goto tx_eq_destroy;
1505
1506 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001507 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508 goto tx_cq_free;
1509
1510 /* Alloc TX eth queue */
1511 q = &adapter->tx_obj.q;
1512 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1513 goto tx_cq_destroy;
1514
1515 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001516 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 goto tx_q_free;
1518 return 0;
1519
1520tx_q_free:
1521 be_queue_free(adapter, q);
1522tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001523 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524tx_cq_free:
1525 be_queue_free(adapter, cq);
1526tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001527 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528tx_eq_free:
1529 be_queue_free(adapter, eq);
1530 return -1;
1531}
1532
1533static void be_rx_queues_destroy(struct be_adapter *adapter)
1534{
1535 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001536 struct be_rx_obj *rxo;
1537 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538
Sathya Perla3abcded2010-10-03 22:12:27 -07001539 for_all_rx_queues(adapter, rxo, i) {
1540 q = &rxo->q;
1541 if (q->created) {
1542 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1543 /* After the rxq is invalidated, wait for a grace time
1544 * of 1ms for all dma to end and the flush compl to
1545 * arrive
1546 */
1547 mdelay(1);
1548 be_rx_q_clean(adapter, rxo);
1549 }
1550 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001551
Sathya Perla3abcded2010-10-03 22:12:27 -07001552 q = &rxo->cq;
1553 if (q->created)
1554 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1555 be_queue_free(adapter, q);
1556
1557 /* Clear any residual events */
1558 q = &rxo->rx_eq.q;
1559 if (q->created) {
1560 be_eq_clean(adapter, &rxo->rx_eq);
1561 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1562 }
1563 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565}
1566
1567static int be_rx_queues_create(struct be_adapter *adapter)
1568{
1569 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001570 struct be_rx_obj *rxo;
1571 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001574 for_all_rx_queues(adapter, rxo, i) {
1575 rxo->adapter = adapter;
Sathya Perla64642812010-12-01 01:04:17 +00001576 /* Init last_frag_index so that the frag index in the first
1577 * completion will never match */
1578 rxo->last_frag_index = 0xffff;
Sathya Perla3abcded2010-10-03 22:12:27 -07001579 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581
Sathya Perla3abcded2010-10-03 22:12:27 -07001582 /* EQ */
1583 eq = &rxo->rx_eq.q;
1584 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585 sizeof(struct be_eq_entry));
1586 if (rc)
1587 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588
Sathya Perla3abcded2010-10-03 22:12:27 -07001589 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1590 if (rc)
1591 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001593 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1594
Sathya Perla3abcded2010-10-03 22:12:27 -07001595 /* CQ */
1596 cq = &rxo->cq;
1597 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598 sizeof(struct be_eth_rx_compl));
1599 if (rc)
1600 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601
Sathya Perla3abcded2010-10-03 22:12:27 -07001602 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1603 if (rc)
1604 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001605 /* Rx Q */
1606 q = &rxo->q;
1607 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608 sizeof(struct be_eth_rx_d));
1609 if (rc)
1610 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611
Sathya Perla3abcded2010-10-03 22:12:27 -07001612 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1615 if (rc)
1616 goto err;
1617 }
1618
1619 if (be_multi_rxq(adapter)) {
1620 u8 rsstable[MAX_RSS_QS];
1621
1622 for_all_rss_queues(adapter, rxo, i)
1623 rsstable[i] = rxo->rss_id;
1624
1625 rc = be_cmd_rss_config(adapter, rsstable,
1626 adapter->num_rx_qs - 1);
1627 if (rc)
1628 goto err;
1629 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
1631 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001632err:
1633 be_rx_queues_destroy(adapter);
1634 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001637static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001638{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001639 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640 if (!eqe->evt)
1641 return false;
1642 else
1643 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001644}
1645
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646static irqreturn_t be_intx(int irq, void *dev)
1647{
1648 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001649 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001650 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001652 if (lancer_chip(adapter)) {
1653 if (event_peek(&adapter->tx_eq))
1654 tx = event_handle(adapter, &adapter->tx_eq);
1655 for_all_rx_queues(adapter, rxo, i) {
1656 if (event_peek(&rxo->rx_eq))
1657 rx |= event_handle(adapter, &rxo->rx_eq);
1658 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001660 if (!(tx || rx))
1661 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001662
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001663 } else {
1664 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666 if (!isr)
1667 return IRQ_NONE;
1668
1669 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670 event_handle(adapter, &adapter->tx_eq);
1671
1672 for_all_rx_queues(adapter, rxo, i) {
1673 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674 event_handle(adapter, &rxo->rx_eq);
1675 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001676 }
Sathya Perlac001c212009-07-01 01:06:07 +00001677
Sathya Perla8788fdc2009-07-27 22:52:03 +00001678 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679}
1680
1681static irqreturn_t be_msix_rx(int irq, void *dev)
1682{
Sathya Perla3abcded2010-10-03 22:12:27 -07001683 struct be_rx_obj *rxo = dev;
1684 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685
Sathya Perla3abcded2010-10-03 22:12:27 -07001686 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
1688 return IRQ_HANDLED;
1689}
1690
Sathya Perla5fb379e2009-06-18 00:02:59 +00001691static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692{
1693 struct be_adapter *adapter = dev;
1694
Sathya Perla8788fdc2009-07-27 22:52:03 +00001695 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
1697 return IRQ_HANDLED;
1698}
1699
Sathya Perla64642812010-12-01 01:04:17 +00001700static inline bool do_gro(struct be_rx_obj *rxo,
1701 struct be_eth_rx_compl *rxcp, u8 err)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1704
1705 if (err)
Sathya Perla3abcded2010-10-03 22:12:27 -07001706 rxo->stats.rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001708 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709}
1710
stephen hemminger49b05222010-10-21 07:50:48 +00001711static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712{
1713 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001714 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1715 struct be_adapter *adapter = rxo->adapter;
1716 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 struct be_eth_rx_compl *rxcp;
1718 u32 work_done;
Sathya Perla64642812010-12-01 01:04:17 +00001719 u16 frag_index, num_rcvd;
1720 u8 err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721
Sathya Perla3abcded2010-10-03 22:12:27 -07001722 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001724 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 if (!rxcp)
1726 break;
1727
Sathya Perla64642812010-12-01 01:04:17 +00001728 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1729 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1730 rxcp);
1731 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1732 rxcp);
1733
1734 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1735 if (likely(frag_index != rxo->last_frag_index &&
1736 num_rcvd != 0)) {
1737 rxo->last_frag_index = frag_index;
1738
1739 if (do_gro(rxo, rxcp, err))
1740 be_rx_compl_process_gro(adapter, rxo, rxcp);
1741 else
1742 be_rx_compl_process(adapter, rxo, rxcp);
1743 }
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001744
1745 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746 }
1747
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001749 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1750 be_post_rx_frags(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751
1752 /* All consumed */
1753 if (work_done < budget) {
1754 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001755 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756 } else {
1757 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001758 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759 }
1760 return work_done;
1761}
1762
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001763/* As TX and MCC share the same EQ check for both TX and MCC completions.
1764 * For TX/MCC we don't honour budget; consume everything
1765 */
1766static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001768 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1769 struct be_adapter *adapter =
1770 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001771 struct be_queue_info *txq = &adapter->tx_obj.q;
1772 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001774 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775 u16 end_idx;
1776
Sathya Perla5fb379e2009-06-18 00:02:59 +00001777 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001779 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001781 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782 }
1783
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001784 mcc_compl = be_process_mcc(adapter, &status);
1785
1786 napi_complete(napi);
1787
1788 if (mcc_compl) {
1789 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1790 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1791 }
1792
1793 if (tx_compl) {
1794 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001795
1796 /* As Tx wrbs have been freed up, wake up netdev queue if
1797 * it was stopped due to lack of tx wrbs.
1798 */
1799 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001801 netif_wake_queue(adapter->netdev);
1802 }
1803
Sathya Perla3abcded2010-10-03 22:12:27 -07001804 tx_stats(adapter)->be_tx_events++;
1805 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807
1808 return 1;
1809}
1810
Ajit Khaparded053de92010-09-03 06:23:30 +00001811void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001812{
1813 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1814 u32 i;
1815
1816 pci_read_config_dword(adapter->pdev,
1817 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1818 pci_read_config_dword(adapter->pdev,
1819 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1820 pci_read_config_dword(adapter->pdev,
1821 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1822 pci_read_config_dword(adapter->pdev,
1823 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1824
1825 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1826 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1827
Ajit Khaparded053de92010-09-03 06:23:30 +00001828 if (ue_status_lo || ue_status_hi) {
1829 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001830 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001831 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1832 }
1833
Ajit Khaparde7c185272010-07-29 06:16:33 +00001834 if (ue_status_lo) {
1835 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1836 if (ue_status_lo & 1)
1837 dev_err(&adapter->pdev->dev,
1838 "UE: %s bit set\n", ue_status_low_desc[i]);
1839 }
1840 }
1841 if (ue_status_hi) {
1842 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1843 if (ue_status_hi & 1)
1844 dev_err(&adapter->pdev->dev,
1845 "UE: %s bit set\n", ue_status_hi_desc[i]);
1846 }
1847 }
1848
1849}
1850
Sathya Perlaea1dae12009-03-19 23:56:20 -07001851static void be_worker(struct work_struct *work)
1852{
1853 struct be_adapter *adapter =
1854 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001855 struct be_rx_obj *rxo;
1856 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001857
Somnath Koturf203af72010-10-25 23:01:03 +00001858 /* when interrupts are not yet enabled, just reap any pending
1859 * mcc completions */
1860 if (!netif_running(adapter->netdev)) {
1861 int mcc_compl, status = 0;
1862
1863 mcc_compl = be_process_mcc(adapter, &status);
1864
1865 if (mcc_compl) {
1866 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1867 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1868 }
1869 goto reschedule;
1870 }
1871
Ajit Khaparde0fc48c32010-07-29 06:18:58 +00001872 if (!adapter->stats_ioctl_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001873 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001874
Sathya Perla4097f662009-03-24 16:40:13 -07001875 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001876
Sathya Perla3abcded2010-10-03 22:12:27 -07001877 for_all_rx_queues(adapter, rxo, i) {
1878 be_rx_rate_update(rxo);
1879 be_rx_eqd_update(adapter, rxo);
1880
1881 if (rxo->rx_post_starved) {
1882 rxo->rx_post_starved = false;
1883 be_post_rx_frags(rxo);
1884 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001885 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001886 if (!adapter->ue_detected && !lancer_chip(adapter))
Ajit Khaparded053de92010-09-03 06:23:30 +00001887 be_detect_dump_ue(adapter);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001888
Somnath Koturf203af72010-10-25 23:01:03 +00001889reschedule:
Sathya Perlaea1dae12009-03-19 23:56:20 -07001890 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1891}
1892
Sathya Perla8d56ff12009-11-22 22:02:26 +00001893static void be_msix_disable(struct be_adapter *adapter)
1894{
1895 if (adapter->msix_enabled) {
1896 pci_disable_msix(adapter->pdev);
1897 adapter->msix_enabled = false;
1898 }
1899}
1900
Sathya Perla3abcded2010-10-03 22:12:27 -07001901static int be_num_rxqs_get(struct be_adapter *adapter)
1902{
1903 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1904 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1905 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1906 } else {
1907 dev_warn(&adapter->pdev->dev,
1908 "No support for multiple RX queues\n");
1909 return 1;
1910 }
1911}
1912
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913static void be_msix_enable(struct be_adapter *adapter)
1914{
Sathya Perla3abcded2010-10-03 22:12:27 -07001915#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 int i, status;
1917
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1919
1920 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921 adapter->msix_entries[i].entry = i;
1922
1923 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perla3abcded2010-10-03 22:12:27 -07001924 adapter->num_rx_qs + 1);
1925 if (status == 0) {
1926 goto done;
1927 } else if (status >= BE_MIN_MSIX_VECTORS) {
1928 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1929 status) == 0) {
1930 adapter->num_rx_qs = status - 1;
1931 dev_warn(&adapter->pdev->dev,
1932 "Could alloc only %d MSIx vectors. "
1933 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1934 goto done;
1935 }
1936 }
1937 return;
1938done:
1939 adapter->msix_enabled = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940}
1941
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001942static void be_sriov_enable(struct be_adapter *adapter)
1943{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001944 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001945#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001946 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001947 int status;
1948
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001949 status = pci_enable_sriov(adapter->pdev, num_vfs);
1950 adapter->sriov_enabled = status ? false : true;
1951 }
1952#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001953}
1954
1955static void be_sriov_disable(struct be_adapter *adapter)
1956{
1957#ifdef CONFIG_PCI_IOV
1958 if (adapter->sriov_enabled) {
1959 pci_disable_sriov(adapter->pdev);
1960 adapter->sriov_enabled = false;
1961 }
1962#endif
1963}
1964
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001965static inline int be_msix_vec_get(struct be_adapter *adapter,
1966 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001968 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00001969}
1970
1971static int be_request_irq(struct be_adapter *adapter,
1972 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001973 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001974{
1975 struct net_device *netdev = adapter->netdev;
1976 int vec;
1977
1978 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001979 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001980 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00001981}
1982
Sathya Perla3abcded2010-10-03 22:12:27 -07001983static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1984 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001985{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001986 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001987 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988}
1989
1990static int be_msix_register(struct be_adapter *adapter)
1991{
Sathya Perla3abcded2010-10-03 22:12:27 -07001992 struct be_rx_obj *rxo;
1993 int status, i;
1994 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995
Sathya Perla3abcded2010-10-03 22:12:27 -07001996 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1997 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998 if (status)
1999 goto err;
2000
Sathya Perla3abcded2010-10-03 22:12:27 -07002001 for_all_rx_queues(adapter, rxo, i) {
2002 sprintf(qname, "rxq%d", i);
2003 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2004 qname, rxo);
2005 if (status)
2006 goto err_msix;
2007 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002008
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002010
Sathya Perla3abcded2010-10-03 22:12:27 -07002011err_msix:
2012 be_free_irq(adapter, &adapter->tx_eq, adapter);
2013
2014 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2015 be_free_irq(adapter, &rxo->rx_eq, rxo);
2016
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017err:
2018 dev_warn(&adapter->pdev->dev,
2019 "MSIX Request IRQ failed - err %d\n", status);
2020 pci_disable_msix(adapter->pdev);
2021 adapter->msix_enabled = false;
2022 return status;
2023}
2024
2025static int be_irq_register(struct be_adapter *adapter)
2026{
2027 struct net_device *netdev = adapter->netdev;
2028 int status;
2029
2030 if (adapter->msix_enabled) {
2031 status = be_msix_register(adapter);
2032 if (status == 0)
2033 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002034 /* INTx is not supported for VF */
2035 if (!be_physfn(adapter))
2036 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037 }
2038
2039 /* INTx */
2040 netdev->irq = adapter->pdev->irq;
2041 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2042 adapter);
2043 if (status) {
2044 dev_err(&adapter->pdev->dev,
2045 "INTx request IRQ failed - err %d\n", status);
2046 return status;
2047 }
2048done:
2049 adapter->isr_registered = true;
2050 return 0;
2051}
2052
2053static void be_irq_unregister(struct be_adapter *adapter)
2054{
2055 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002056 struct be_rx_obj *rxo;
2057 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058
2059 if (!adapter->isr_registered)
2060 return;
2061
2062 /* INTx */
2063 if (!adapter->msix_enabled) {
2064 free_irq(netdev->irq, adapter);
2065 goto done;
2066 }
2067
2068 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002069 be_free_irq(adapter, &adapter->tx_eq, adapter);
2070
2071 for_all_rx_queues(adapter, rxo, i)
2072 be_free_irq(adapter, &rxo->rx_eq, rxo);
2073
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074done:
2075 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076}
2077
Sathya Perla889cd4b2010-05-30 23:33:45 +00002078static int be_close(struct net_device *netdev)
2079{
2080 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002081 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002082 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002083 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002084
Sathya Perla889cd4b2010-05-30 23:33:45 +00002085 be_async_mcc_disable(adapter);
2086
2087 netif_stop_queue(netdev);
2088 netif_carrier_off(netdev);
2089 adapter->link_up = false;
2090
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002091 if (!lancer_chip(adapter))
2092 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002093
2094 if (adapter->msix_enabled) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002095 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002096 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002097
2098 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002099 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002100 synchronize_irq(vec);
2101 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002102 } else {
2103 synchronize_irq(netdev->irq);
2104 }
2105 be_irq_unregister(adapter);
2106
Sathya Perla3abcded2010-10-03 22:12:27 -07002107 for_all_rx_queues(adapter, rxo, i)
2108 napi_disable(&rxo->rx_eq.napi);
2109
Sathya Perla889cd4b2010-05-30 23:33:45 +00002110 napi_disable(&tx_eq->napi);
2111
2112 /* Wait for all pending tx completions to arrive so that
2113 * all tx skbs are freed.
2114 */
2115 be_tx_compl_clean(adapter);
2116
2117 return 0;
2118}
2119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120static int be_open(struct net_device *netdev)
2121{
2122 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002124 struct be_rx_obj *rxo;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002125 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002126 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002127 u8 mac_speed;
2128 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002129
Sathya Perla3abcded2010-10-03 22:12:27 -07002130 for_all_rx_queues(adapter, rxo, i) {
2131 be_post_rx_frags(rxo);
2132 napi_enable(&rxo->rx_eq.napi);
2133 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002134 napi_enable(&tx_eq->napi);
2135
2136 be_irq_register(adapter);
2137
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002138 if (!lancer_chip(adapter))
2139 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002140
2141 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002142 for_all_rx_queues(adapter, rxo, i) {
2143 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2144 be_cq_notify(adapter, rxo->cq.id, true, 0);
2145 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002146 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002147
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002148 /* Now that interrupts are on we can process async mcc */
2149 be_async_mcc_enable(adapter);
2150
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002151 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2152 &link_speed);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002153 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002154 goto err;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00002155 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002156
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002157 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002158 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002159 if (status)
2160 goto err;
2161
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002162 status = be_cmd_set_flow_control(adapter,
2163 adapter->tx_fc, adapter->rx_fc);
2164 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002165 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002166 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002167
Sathya Perla889cd4b2010-05-30 23:33:45 +00002168 return 0;
2169err:
2170 be_close(adapter->netdev);
2171 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002172}
2173
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002174static int be_setup_wol(struct be_adapter *adapter, bool enable)
2175{
2176 struct be_dma_mem cmd;
2177 int status = 0;
2178 u8 mac[ETH_ALEN];
2179
2180 memset(mac, 0, ETH_ALEN);
2181
2182 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002183 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2184 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002185 if (cmd.va == NULL)
2186 return -1;
2187 memset(cmd.va, 0, cmd.size);
2188
2189 if (enable) {
2190 status = pci_write_config_dword(adapter->pdev,
2191 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2192 if (status) {
2193 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002194 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002195 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2196 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002197 return status;
2198 }
2199 status = be_cmd_enable_magic_wol(adapter,
2200 adapter->netdev->dev_addr, &cmd);
2201 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2202 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2203 } else {
2204 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2205 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2206 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2207 }
2208
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002209 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002210 return status;
2211}
2212
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002213/*
2214 * Generate a seed MAC address from the PF MAC Address using jhash.
2215 * MAC Address for VFs are assigned incrementally starting from the seed.
2216 * These addresses are programmed in the ASIC by the PF and the VF driver
2217 * queries for the MAC address during its probe.
2218 */
2219static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2220{
2221 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002222 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002223 u8 mac[ETH_ALEN];
2224
2225 be_vf_eth_addr_generate(adapter, mac);
2226
2227 for (vf = 0; vf < num_vfs; vf++) {
2228 status = be_cmd_pmac_add(adapter, mac,
2229 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002230 &adapter->vf_cfg[vf].vf_pmac_id,
2231 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002232 if (status)
2233 dev_err(&adapter->pdev->dev,
2234 "Mac address add failed for VF %d\n", vf);
2235 else
2236 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2237
2238 mac[5] += 1;
2239 }
2240 return status;
2241}
2242
2243static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2244{
2245 u32 vf;
2246
2247 for (vf = 0; vf < num_vfs; vf++) {
2248 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2249 be_cmd_pmac_del(adapter,
2250 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002251 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002252 }
2253}
2254
Sathya Perla5fb379e2009-06-18 00:02:59 +00002255static int be_setup(struct be_adapter *adapter)
2256{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002257 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002258 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002260 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002261
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002262 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2263
2264 if (be_physfn(adapter)) {
2265 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2266 BE_IF_FLAGS_PROMISCUOUS |
2267 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2268 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002269
2270 if (be_multi_rxq(adapter)) {
2271 cap_flags |= BE_IF_FLAGS_RSS;
2272 en_flags |= BE_IF_FLAGS_RSS;
2273 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002274 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002275
2276 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2277 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002278 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279 if (status != 0)
2280 goto do_none;
2281
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002282 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002283 if (adapter->sriov_enabled) {
2284 while (vf < num_vfs) {
2285 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2286 BE_IF_FLAGS_BROADCAST;
2287 status = be_cmd_if_create(adapter, cap_flags,
2288 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002289 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002290 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002291 if (status) {
2292 dev_err(&adapter->pdev->dev,
2293 "Interface Create failed for VF %d\n",
2294 vf);
2295 goto if_destroy;
2296 }
2297 adapter->vf_cfg[vf].vf_pmac_id =
2298 BE_INVALID_PMAC_ID;
2299 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002300 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002301 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002302 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002303 status = be_cmd_mac_addr_query(adapter, mac,
2304 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2305 if (!status) {
2306 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2307 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2308 }
2309 }
2310
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002311 status = be_tx_queues_create(adapter);
2312 if (status != 0)
2313 goto if_destroy;
2314
2315 status = be_rx_queues_create(adapter);
2316 if (status != 0)
2317 goto tx_qs_destroy;
2318
Sathya Perla5fb379e2009-06-18 00:02:59 +00002319 status = be_mcc_queues_create(adapter);
2320 if (status != 0)
2321 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002323 adapter->link_speed = -1;
2324
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325 return 0;
2326
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002327 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002328rx_qs_destroy:
2329 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330tx_qs_destroy:
2331 be_tx_queues_destroy(adapter);
2332if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002333 if (be_physfn(adapter) && adapter->sriov_enabled)
2334 for (vf = 0; vf < num_vfs; vf++)
2335 if (adapter->vf_cfg[vf].vf_if_handle)
2336 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002337 adapter->vf_cfg[vf].vf_if_handle,
2338 vf + 1);
2339 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340do_none:
2341 return status;
2342}
2343
Sathya Perla5fb379e2009-06-18 00:02:59 +00002344static int be_clear(struct be_adapter *adapter)
2345{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002346 int vf;
2347
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002348 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002349 be_vf_eth_addr_rem(adapter);
2350
Sathya Perla1a8887d2009-08-17 00:58:41 +00002351 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002352 be_rx_queues_destroy(adapter);
2353 be_tx_queues_destroy(adapter);
2354
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002355 if (be_physfn(adapter) && adapter->sriov_enabled)
2356 for (vf = 0; vf < num_vfs; vf++)
2357 if (adapter->vf_cfg[vf].vf_if_handle)
2358 be_cmd_if_destroy(adapter,
2359 adapter->vf_cfg[vf].vf_if_handle,
2360 vf + 1);
2361
Ajit Khaparde658681f2011-02-11 13:34:46 +00002362 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002363
Sathya Perla2243e2e2009-11-22 22:02:03 +00002364 /* tell fw we're done with firing cmds */
2365 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002366 return 0;
2367}
2368
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369
Ajit Khaparde84517482009-09-04 03:12:16 +00002370#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002371static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002372 const u8 *p, u32 img_start, int image_size,
2373 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002374{
2375 u32 crc_offset;
2376 u8 flashed_crc[4];
2377 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002378
2379 crc_offset = hdr_size + img_start + image_size - 4;
2380
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002381 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002382
2383 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002384 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002385 if (status) {
2386 dev_err(&adapter->pdev->dev,
2387 "could not get crc from flash, not flashing redboot\n");
2388 return false;
2389 }
2390
2391 /*update redboot only if crc does not match*/
2392 if (!memcmp(flashed_crc, p, 4))
2393 return false;
2394 else
2395 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002396}
2397
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002398static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002399 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002400 struct be_dma_mem *flash_cmd, int num_of_images)
2401
Ajit Khaparde84517482009-09-04 03:12:16 +00002402{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002403 int status = 0, i, filehdr_size = 0;
2404 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002405 int num_bytes;
2406 const u8 *p = fw->data;
2407 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002408 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002409 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002410
Joe Perches215faf92010-12-21 02:16:10 -08002411 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002412 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2413 FLASH_IMAGE_MAX_SIZE_g3},
2414 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2415 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2416 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2417 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2418 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2419 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2420 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2421 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2422 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2423 FLASH_IMAGE_MAX_SIZE_g3},
2424 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2425 FLASH_IMAGE_MAX_SIZE_g3},
2426 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002427 FLASH_IMAGE_MAX_SIZE_g3},
2428 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2429 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002430 };
Joe Perches215faf92010-12-21 02:16:10 -08002431 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002432 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2433 FLASH_IMAGE_MAX_SIZE_g2},
2434 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2435 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2436 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2437 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2438 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2439 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2440 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2441 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2442 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2443 FLASH_IMAGE_MAX_SIZE_g2},
2444 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2445 FLASH_IMAGE_MAX_SIZE_g2},
2446 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2447 FLASH_IMAGE_MAX_SIZE_g2}
2448 };
2449
2450 if (adapter->generation == BE_GEN3) {
2451 pflashcomp = gen3_flash_types;
2452 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002453 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002454 } else {
2455 pflashcomp = gen2_flash_types;
2456 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002457 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002458 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002459 for (i = 0; i < num_comp; i++) {
2460 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2461 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2462 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002463 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2464 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002465 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2466 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002467 continue;
2468 p = fw->data;
2469 p += filehdr_size + pflashcomp[i].offset
2470 + (num_of_images * sizeof(struct image_hdr));
2471 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002472 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002473 total_bytes = pflashcomp[i].size;
2474 while (total_bytes) {
2475 if (total_bytes > 32*1024)
2476 num_bytes = 32*1024;
2477 else
2478 num_bytes = total_bytes;
2479 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002480
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002481 if (!total_bytes)
2482 flash_op = FLASHROM_OPER_FLASH;
2483 else
2484 flash_op = FLASHROM_OPER_SAVE;
2485 memcpy(req->params.data_buf, p, num_bytes);
2486 p += num_bytes;
2487 status = be_cmd_write_flashrom(adapter, flash_cmd,
2488 pflashcomp[i].optype, flash_op, num_bytes);
2489 if (status) {
2490 dev_err(&adapter->pdev->dev,
2491 "cmd to write to flash rom failed.\n");
2492 return -1;
2493 }
2494 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002495 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002496 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002497 return 0;
2498}
2499
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002500static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2501{
2502 if (fhdr == NULL)
2503 return 0;
2504 if (fhdr->build[0] == '3')
2505 return BE_GEN3;
2506 else if (fhdr->build[0] == '2')
2507 return BE_GEN2;
2508 else
2509 return 0;
2510}
2511
Ajit Khaparde84517482009-09-04 03:12:16 +00002512int be_load_fw(struct be_adapter *adapter, u8 *func)
2513{
2514 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2515 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002516 struct flash_file_hdr_g2 *fhdr;
2517 struct flash_file_hdr_g3 *fhdr3;
2518 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002519 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002520 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002521 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002522
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002523 if (!netif_running(adapter->netdev)) {
2524 dev_err(&adapter->pdev->dev,
2525 "Firmware load not allowed (interface is down)\n");
2526 return -EPERM;
2527 }
2528
Ajit Khaparde84517482009-09-04 03:12:16 +00002529 strcpy(fw_file, func);
2530
2531 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2532 if (status)
2533 goto fw_exit;
2534
2535 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002536 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002537 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2538
Ajit Khaparde84517482009-09-04 03:12:16 +00002539 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002540 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2541 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002542 if (!flash_cmd.va) {
2543 status = -ENOMEM;
2544 dev_err(&adapter->pdev->dev,
2545 "Memory allocation failure while flashing\n");
2546 goto fw_exit;
2547 }
2548
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002549 if ((adapter->generation == BE_GEN3) &&
2550 (get_ufigen_type(fhdr) == BE_GEN3)) {
2551 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002552 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2553 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002554 img_hdr_ptr = (struct image_hdr *) (fw->data +
2555 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002556 i * sizeof(struct image_hdr)));
2557 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2558 status = be_flash_data(adapter, fw, &flash_cmd,
2559 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002560 }
2561 } else if ((adapter->generation == BE_GEN2) &&
2562 (get_ufigen_type(fhdr) == BE_GEN2)) {
2563 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2564 } else {
2565 dev_err(&adapter->pdev->dev,
2566 "UFI and Interface are not compatible for flashing\n");
2567 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002568 }
2569
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002570 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2571 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002572 if (status) {
2573 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2574 goto fw_exit;
2575 }
2576
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002577 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002578
2579fw_exit:
2580 release_firmware(fw);
2581 return status;
2582}
2583
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002584static struct net_device_ops be_netdev_ops = {
2585 .ndo_open = be_open,
2586 .ndo_stop = be_close,
2587 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002588 .ndo_set_rx_mode = be_set_multicast_list,
2589 .ndo_set_mac_address = be_mac_addr_set,
2590 .ndo_change_mtu = be_change_mtu,
2591 .ndo_validate_addr = eth_validate_addr,
2592 .ndo_vlan_rx_register = be_vlan_register,
2593 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2594 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002595 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002596 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002597 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002598 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002599};
2600
2601static void be_netdev_init(struct net_device *netdev)
2602{
2603 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002604 struct be_rx_obj *rxo;
2605 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002606
2607 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Michał Mirosław79032642010-11-30 06:38:00 +00002608 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2609 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ajit Khaparde49e4b8472010-06-14 04:56:07 +00002610 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002611
Michał Mirosław79032642010-11-30 06:38:00 +00002612 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2613 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002614
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002615 if (lancer_chip(adapter))
2616 netdev->vlan_features |= NETIF_F_TSO6;
2617
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002618 netdev->flags |= IFF_MULTICAST;
2619
Ajit Khaparde728a9972009-04-13 15:41:22 -07002620 adapter->rx_csum = true;
2621
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002622 /* Default settings for Rx and Tx flow control */
2623 adapter->rx_fc = true;
2624 adapter->tx_fc = true;
2625
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002626 netif_set_gso_max_size(netdev, 65535);
2627
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2629
2630 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2631
Sathya Perla3abcded2010-10-03 22:12:27 -07002632 for_all_rx_queues(adapter, rxo, i)
2633 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2634 BE_NAPI_WEIGHT);
2635
Sathya Perla5fb379e2009-06-18 00:02:59 +00002636 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002638}
2639
2640static void be_unmap_pci_bars(struct be_adapter *adapter)
2641{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002642 if (adapter->csr)
2643 iounmap(adapter->csr);
2644 if (adapter->db)
2645 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002646 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002647 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002648}
2649
2650static int be_map_pci_bars(struct be_adapter *adapter)
2651{
2652 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002653 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002654
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002655 if (lancer_chip(adapter)) {
2656 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2657 pci_resource_len(adapter->pdev, 0));
2658 if (addr == NULL)
2659 return -ENOMEM;
2660 adapter->db = addr;
2661 return 0;
2662 }
2663
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002664 if (be_physfn(adapter)) {
2665 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2666 pci_resource_len(adapter->pdev, 2));
2667 if (addr == NULL)
2668 return -ENOMEM;
2669 adapter->csr = addr;
2670 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002671
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002672 if (adapter->generation == BE_GEN2) {
2673 pcicfg_reg = 1;
2674 db_reg = 4;
2675 } else {
2676 pcicfg_reg = 0;
2677 if (be_physfn(adapter))
2678 db_reg = 4;
2679 else
2680 db_reg = 0;
2681 }
2682 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2683 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002684 if (addr == NULL)
2685 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002686 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002687
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002688 if (be_physfn(adapter)) {
2689 addr = ioremap_nocache(
2690 pci_resource_start(adapter->pdev, pcicfg_reg),
2691 pci_resource_len(adapter->pdev, pcicfg_reg));
2692 if (addr == NULL)
2693 goto pci_map_err;
2694 adapter->pcicfg = addr;
2695 } else
2696 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002697
2698 return 0;
2699pci_map_err:
2700 be_unmap_pci_bars(adapter);
2701 return -ENOMEM;
2702}
2703
2704
2705static void be_ctrl_cleanup(struct be_adapter *adapter)
2706{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002707 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002708
2709 be_unmap_pci_bars(adapter);
2710
2711 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002712 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2713 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002714
2715 mem = &adapter->mc_cmd_mem;
2716 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002717 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2718 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002719}
2720
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002721static int be_ctrl_init(struct be_adapter *adapter)
2722{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002723 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2724 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002725 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002726 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002727
2728 status = be_map_pci_bars(adapter);
2729 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002730 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002731
2732 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002733 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2734 mbox_mem_alloc->size,
2735 &mbox_mem_alloc->dma,
2736 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002737 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002738 status = -ENOMEM;
2739 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002741
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002742 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2743 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2744 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2745 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002746
2747 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002748 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2749 mc_cmd_mem->size, &mc_cmd_mem->dma,
2750 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002751 if (mc_cmd_mem->va == NULL) {
2752 status = -ENOMEM;
2753 goto free_mbox;
2754 }
2755 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2756
Ivan Vecera29849612010-12-14 05:43:19 +00002757 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002758 spin_lock_init(&adapter->mcc_lock);
2759 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002760
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002761 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002762 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002763 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002764
2765free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002766 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2767 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002768
2769unmap_pci_bars:
2770 be_unmap_pci_bars(adapter);
2771
2772done:
2773 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002774}
2775
2776static void be_stats_cleanup(struct be_adapter *adapter)
2777{
Sathya Perla3abcded2010-10-03 22:12:27 -07002778 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002779
2780 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002781 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2782 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002783}
2784
2785static int be_stats_init(struct be_adapter *adapter)
2786{
Sathya Perla3abcded2010-10-03 22:12:27 -07002787 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002788
2789 cmd->size = sizeof(struct be_cmd_req_get_stats);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002790 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2791 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002792 if (cmd->va == NULL)
2793 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002794 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002795 return 0;
2796}
2797
2798static void __devexit be_remove(struct pci_dev *pdev)
2799{
2800 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002801
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002802 if (!adapter)
2803 return;
2804
Somnath Koturf203af72010-10-25 23:01:03 +00002805 cancel_delayed_work_sync(&adapter->work);
2806
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002807 unregister_netdev(adapter->netdev);
2808
Sathya Perla5fb379e2009-06-18 00:02:59 +00002809 be_clear(adapter);
2810
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002811 be_stats_cleanup(adapter);
2812
2813 be_ctrl_cleanup(adapter);
2814
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002815 be_sriov_disable(adapter);
2816
Sathya Perla8d56ff12009-11-22 22:02:26 +00002817 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002818
2819 pci_set_drvdata(pdev, NULL);
2820 pci_release_regions(pdev);
2821 pci_disable_device(pdev);
2822
2823 free_netdev(adapter->netdev);
2824}
2825
Sathya Perla2243e2e2009-11-22 22:02:03 +00002826static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002827{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002828 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002829 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002830
Sathya Perla8788fdc2009-07-27 22:52:03 +00002831 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002832 if (status)
2833 return status;
2834
Sathya Perla3abcded2010-10-03 22:12:27 -07002835 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2836 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002837 if (status)
2838 return status;
2839
2840 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002841
2842 if (be_physfn(adapter)) {
2843 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002844 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002845
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002846 if (status)
2847 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002848
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002849 if (!is_valid_ether_addr(mac))
2850 return -EADDRNOTAVAIL;
2851
2852 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2853 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2854 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002855
Ajit Khaparde3486be22010-07-23 02:04:54 +00002856 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002857 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2858 else
2859 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2860
Sathya Perla2243e2e2009-11-22 22:02:03 +00002861 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002862}
2863
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002864static int be_dev_family_check(struct be_adapter *adapter)
2865{
2866 struct pci_dev *pdev = adapter->pdev;
2867 u32 sli_intf = 0, if_type;
2868
2869 switch (pdev->device) {
2870 case BE_DEVICE_ID1:
2871 case OC_DEVICE_ID1:
2872 adapter->generation = BE_GEN2;
2873 break;
2874 case BE_DEVICE_ID2:
2875 case OC_DEVICE_ID2:
2876 adapter->generation = BE_GEN3;
2877 break;
2878 case OC_DEVICE_ID3:
2879 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2880 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2881 SLI_INTF_IF_TYPE_SHIFT;
2882
2883 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2884 if_type != 0x02) {
2885 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2886 return -EINVAL;
2887 }
2888 if (num_vfs > 0) {
2889 dev_err(&pdev->dev, "VFs not supported\n");
2890 return -EINVAL;
2891 }
2892 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2893 SLI_INTF_FAMILY_SHIFT);
2894 adapter->generation = BE_GEN3;
2895 break;
2896 default:
2897 adapter->generation = 0;
2898 }
2899 return 0;
2900}
2901
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002902static int __devinit be_probe(struct pci_dev *pdev,
2903 const struct pci_device_id *pdev_id)
2904{
2905 int status = 0;
2906 struct be_adapter *adapter;
2907 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002908
2909 status = pci_enable_device(pdev);
2910 if (status)
2911 goto do_none;
2912
2913 status = pci_request_regions(pdev, DRV_NAME);
2914 if (status)
2915 goto disable_dev;
2916 pci_set_master(pdev);
2917
2918 netdev = alloc_etherdev(sizeof(struct be_adapter));
2919 if (netdev == NULL) {
2920 status = -ENOMEM;
2921 goto rel_reg;
2922 }
2923 adapter = netdev_priv(netdev);
2924 adapter->pdev = pdev;
2925 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002926
2927 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00002928 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002929 goto free_netdev;
2930
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002931 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002932 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002934 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002935 if (!status) {
2936 netdev->features |= NETIF_F_HIGHDMA;
2937 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002938 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002939 if (status) {
2940 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2941 goto free_netdev;
2942 }
2943 }
2944
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002945 be_sriov_enable(adapter);
2946
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002947 status = be_ctrl_init(adapter);
2948 if (status)
2949 goto free_netdev;
2950
Sathya Perla2243e2e2009-11-22 22:02:03 +00002951 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002952 if (be_physfn(adapter)) {
2953 status = be_cmd_POST(adapter);
2954 if (status)
2955 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002956 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002957
2958 /* tell fw we're ready to fire cmds */
2959 status = be_cmd_fw_init(adapter);
2960 if (status)
2961 goto ctrl_clean;
2962
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00002963 status = be_cmd_reset_function(adapter);
2964 if (status)
2965 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07002966
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002967 status = be_stats_init(adapter);
2968 if (status)
2969 goto ctrl_clean;
2970
Sathya Perla2243e2e2009-11-22 22:02:03 +00002971 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002972 if (status)
2973 goto stats_clean;
2974
Sathya Perla3abcded2010-10-03 22:12:27 -07002975 be_msix_enable(adapter);
2976
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002977 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002978
Sathya Perla5fb379e2009-06-18 00:02:59 +00002979 status = be_setup(adapter);
2980 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002981 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002982
Sathya Perla3abcded2010-10-03 22:12:27 -07002983 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002984 status = register_netdev(netdev);
2985 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002986 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00002987 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002988
Ajit Khapardee6319362011-02-11 13:35:41 +00002989 if (be_physfn(adapter) && adapter->sriov_enabled) {
2990 status = be_vf_eth_addr_config(adapter);
2991 if (status)
2992 goto unreg_netdev;
2993 }
2994
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002995 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00002996 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002997 return 0;
2998
Ajit Khapardee6319362011-02-11 13:35:41 +00002999unreg_netdev:
3000 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003001unsetup:
3002 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003003msix_disable:
3004 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003005stats_clean:
3006 be_stats_cleanup(adapter);
3007ctrl_clean:
3008 be_ctrl_cleanup(adapter);
3009free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003010 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003011 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003012 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003013rel_reg:
3014 pci_release_regions(pdev);
3015disable_dev:
3016 pci_disable_device(pdev);
3017do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003018 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019 return status;
3020}
3021
3022static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3023{
3024 struct be_adapter *adapter = pci_get_drvdata(pdev);
3025 struct net_device *netdev = adapter->netdev;
3026
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003027 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003028 if (adapter->wol)
3029 be_setup_wol(adapter, true);
3030
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003031 netif_device_detach(netdev);
3032 if (netif_running(netdev)) {
3033 rtnl_lock();
3034 be_close(netdev);
3035 rtnl_unlock();
3036 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003037 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003038 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003039
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003040 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003041 pci_save_state(pdev);
3042 pci_disable_device(pdev);
3043 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3044 return 0;
3045}
3046
3047static int be_resume(struct pci_dev *pdev)
3048{
3049 int status = 0;
3050 struct be_adapter *adapter = pci_get_drvdata(pdev);
3051 struct net_device *netdev = adapter->netdev;
3052
3053 netif_device_detach(netdev);
3054
3055 status = pci_enable_device(pdev);
3056 if (status)
3057 return status;
3058
3059 pci_set_power_state(pdev, 0);
3060 pci_restore_state(pdev);
3061
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003062 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003063 /* tell fw we're ready to fire cmds */
3064 status = be_cmd_fw_init(adapter);
3065 if (status)
3066 return status;
3067
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003068 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003069 if (netif_running(netdev)) {
3070 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071 be_open(netdev);
3072 rtnl_unlock();
3073 }
3074 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003075
3076 if (adapter->wol)
3077 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003078
3079 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080 return 0;
3081}
3082
Sathya Perla82456b02010-02-17 01:35:37 +00003083/*
3084 * An FLR will stop BE from DMAing any data.
3085 */
3086static void be_shutdown(struct pci_dev *pdev)
3087{
3088 struct be_adapter *adapter = pci_get_drvdata(pdev);
3089 struct net_device *netdev = adapter->netdev;
3090
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003091 if (netif_running(netdev))
3092 cancel_delayed_work_sync(&adapter->work);
3093
Sathya Perla82456b02010-02-17 01:35:37 +00003094 netif_device_detach(netdev);
3095
3096 be_cmd_reset_function(adapter);
3097
3098 if (adapter->wol)
3099 be_setup_wol(adapter, true);
3100
3101 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003102}
3103
Sathya Perlacf588472010-02-14 21:22:01 +00003104static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3105 pci_channel_state_t state)
3106{
3107 struct be_adapter *adapter = pci_get_drvdata(pdev);
3108 struct net_device *netdev = adapter->netdev;
3109
3110 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3111
3112 adapter->eeh_err = true;
3113
3114 netif_device_detach(netdev);
3115
3116 if (netif_running(netdev)) {
3117 rtnl_lock();
3118 be_close(netdev);
3119 rtnl_unlock();
3120 }
3121 be_clear(adapter);
3122
3123 if (state == pci_channel_io_perm_failure)
3124 return PCI_ERS_RESULT_DISCONNECT;
3125
3126 pci_disable_device(pdev);
3127
3128 return PCI_ERS_RESULT_NEED_RESET;
3129}
3130
3131static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3132{
3133 struct be_adapter *adapter = pci_get_drvdata(pdev);
3134 int status;
3135
3136 dev_info(&adapter->pdev->dev, "EEH reset\n");
3137 adapter->eeh_err = false;
3138
3139 status = pci_enable_device(pdev);
3140 if (status)
3141 return PCI_ERS_RESULT_DISCONNECT;
3142
3143 pci_set_master(pdev);
3144 pci_set_power_state(pdev, 0);
3145 pci_restore_state(pdev);
3146
3147 /* Check if card is ok and fw is ready */
3148 status = be_cmd_POST(adapter);
3149 if (status)
3150 return PCI_ERS_RESULT_DISCONNECT;
3151
3152 return PCI_ERS_RESULT_RECOVERED;
3153}
3154
3155static void be_eeh_resume(struct pci_dev *pdev)
3156{
3157 int status = 0;
3158 struct be_adapter *adapter = pci_get_drvdata(pdev);
3159 struct net_device *netdev = adapter->netdev;
3160
3161 dev_info(&adapter->pdev->dev, "EEH resume\n");
3162
3163 pci_save_state(pdev);
3164
3165 /* tell fw we're ready to fire cmds */
3166 status = be_cmd_fw_init(adapter);
3167 if (status)
3168 goto err;
3169
3170 status = be_setup(adapter);
3171 if (status)
3172 goto err;
3173
3174 if (netif_running(netdev)) {
3175 status = be_open(netdev);
3176 if (status)
3177 goto err;
3178 }
3179 netif_device_attach(netdev);
3180 return;
3181err:
3182 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003183}
3184
3185static struct pci_error_handlers be_eeh_handlers = {
3186 .error_detected = be_eeh_err_detected,
3187 .slot_reset = be_eeh_reset,
3188 .resume = be_eeh_resume,
3189};
3190
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003191static struct pci_driver be_driver = {
3192 .name = DRV_NAME,
3193 .id_table = be_dev_ids,
3194 .probe = be_probe,
3195 .remove = be_remove,
3196 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003197 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003198 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003199 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003200};
3201
3202static int __init be_init_module(void)
3203{
Joe Perches8e95a202009-12-03 07:58:21 +00003204 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3205 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003206 printk(KERN_WARNING DRV_NAME
3207 " : Module param rx_frag_size must be 2048/4096/8192."
3208 " Using 2048\n");
3209 rx_frag_size = 2048;
3210 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003211
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003212 if (num_vfs > 32) {
3213 printk(KERN_WARNING DRV_NAME
3214 " : Module param num_vfs must not be greater than 32."
3215 "Using 32\n");
3216 num_vfs = 32;
3217 }
3218
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219 return pci_register_driver(&be_driver);
3220}
3221module_init(be_init_module);
3222
3223static void __exit be_exit_module(void)
3224{
3225 pci_unregister_driver(&be_driver);
3226}
3227module_exit(be_exit_module);