blob: 09eb6998e4d806e9cd59580c6b5dbbbb3708e313 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147
Sathya Perlacf588472010-02-14 21:22:01 +0000148 if (adapter->eeh_err)
149 return;
150
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 iowrite32(reg, addr);
159}
160
Sathya Perla8788fdc2009-07-27 22:52:03 +0000161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000166
167 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000176
177 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179}
180
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000188
189 if (adapter->eeh_err)
190 return;
191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000207
208 if (adapter->eeh_err)
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
Ajit Khapardef8617e02011-02-11 13:36:37 +0000232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000234 if (status)
235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000238 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000239netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000246static void populate_be2_stats(struct be_adapter *adapter)
247{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000251 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000254
Sathya Perlaac124ff2011-07-25 19:10:14 +0000255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000281 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000282 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000283 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
Selvin Xavier005d5692011-05-16 07:36:35 +0000340static void populate_lancer_stats(struct be_adapter *adapter)
341{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000342
Selvin Xavier005d5692011-05-16 07:36:35 +0000343 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000374 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000376}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377
378void be_parse_stats(struct be_adapter *adapter)
379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397}
398
Sathya Perlaab1594e2011-07-25 19:10:15 +0000399static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700401{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000402 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700404 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000405 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000406 u64 pkts, bytes;
407 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700408 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409
Sathya Perla3abcded2010-10-03 22:12:27 -0700410 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700422 }
423
Sathya Perla3c8def92011-06-12 20:01:58 +0000424 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000433 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700434
435 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000445 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700446
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700447 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000448 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000451
Sathya Perlaab1594e2011-07-25 19:10:15 +0000452 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700453
454 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000456
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000462 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463}
464
Sathya Perlaea172a02011-08-02 19:57:42 +0000465void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700466{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467 struct net_device *netdev = adapter->netdev;
468
Sathya Perlaea172a02011-08-02 19:57:42 +0000469 /* when link status changes, link speed must be re-queried from card */
470 adapter->link_speed = -1;
471 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
472 netif_carrier_on(netdev);
473 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
474 } else {
475 netif_carrier_off(netdev);
476 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700477 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700478}
479
Sathya Perla3c8def92011-06-12 20:01:58 +0000480static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000481 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482{
Sathya Perla3c8def92011-06-12 20:01:58 +0000483 struct be_tx_stats *stats = tx_stats(txo);
484
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000486 stats->tx_reqs++;
487 stats->tx_wrbs += wrb_cnt;
488 stats->tx_bytes += copied;
489 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493}
494
495/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000496static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
497 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700499 int cnt = (skb->len > skb->data_len);
500
501 cnt += skb_shinfo(skb)->nr_frags;
502
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503 /* to account for hdr wrb */
504 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000505 if (lancer_chip(adapter) || !(cnt & 1)) {
506 *dummy = false;
507 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508 /* add a dummy to make it an even num */
509 cnt++;
510 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
513 return cnt;
514}
515
516static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
517{
518 wrb->frag_pa_hi = upper_32_bits(addr);
519 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
520 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
521}
522
Somnath Koturcc4ce022010-10-21 07:11:14 -0700523static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
524 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700526 u8 vlan_prio = 0;
527 u16 vlan_tag = 0;
528
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529 memset(hdr, 0, sizeof(*hdr));
530
531 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
532
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000533 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
535 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
536 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000537 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000539 if (lancer_chip(adapter) && adapter->sli_family ==
540 LANCER_A0_SLI_FAMILY) {
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
542 if (is_tcp_pkt(skb))
543 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
544 tcpcs, hdr, 1);
545 else if (is_udp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 udpcs, hdr, 1);
548 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
550 if (is_tcp_pkt(skb))
551 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
552 else if (is_udp_pkt(skb))
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
554 }
555
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700556 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700558 vlan_tag = vlan_tx_tag_get(skb);
559 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
560 /* If vlan priority provided by OS is NOT in available bmap */
561 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
562 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
563 adapter->recommended_prio;
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700565 }
566
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
571}
572
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000573static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000574 bool unmap_single)
575{
576 dma_addr_t dma;
577
578 be_dws_le_to_cpu(wrb, sizeof(*wrb));
579
580 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000581 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000582 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000583 dma_unmap_single(dev, dma, wrb->frag_len,
584 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000585 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000586 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000587 }
588}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589
Sathya Perla3c8def92011-06-12 20:01:58 +0000590static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
592{
Sathya Perla7101e112010-03-22 20:41:12 +0000593 dma_addr_t busaddr;
594 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000595 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 struct be_eth_wrb *wrb;
598 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000599 bool map_single = false;
600 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602 hdr = queue_head_node(txq);
603 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000604 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605
David S. Millerebc8d2a2009-06-09 01:01:31 -0700606 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700607 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000608 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
609 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000610 goto dma_err;
611 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700612 wrb = queue_head_node(txq);
613 wrb_fill(wrb, busaddr, len);
614 be_dws_cpu_to_le(wrb, sizeof(*wrb));
615 queue_head_inc(txq);
616 copied += len;
617 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618
David S. Millerebc8d2a2009-06-09 01:01:31 -0700619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
620 struct skb_frag_struct *frag =
621 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000622 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
623 frag->size, DMA_TO_DEVICE);
624 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000625 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700626 wrb = queue_head_node(txq);
627 wrb_fill(wrb, busaddr, frag->size);
628 be_dws_cpu_to_le(wrb, sizeof(*wrb));
629 queue_head_inc(txq);
630 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631 }
632
633 if (dummy_wrb) {
634 wrb = queue_head_node(txq);
635 wrb_fill(wrb, 0, 0);
636 be_dws_cpu_to_le(wrb, sizeof(*wrb));
637 queue_head_inc(txq);
638 }
639
Somnath Koturcc4ce022010-10-21 07:11:14 -0700640 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 be_dws_cpu_to_le(hdr, sizeof(*hdr));
642
643 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000644dma_err:
645 txq->head = map_head;
646 while (copied) {
647 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000648 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000649 map_single = false;
650 copied -= wrb->frag_len;
651 queue_head_inc(txq);
652 }
653 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654}
655
Stephen Hemminger613573252009-08-31 19:50:58 +0000656static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700657 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658{
659 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000660 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
661 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 u32 wrb_cnt = 0, copied = 0;
663 u32 start = txq->head;
664 bool dummy_wrb, stopped = false;
665
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000666 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667
Sathya Perla3c8def92011-06-12 20:01:58 +0000668 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000669 if (copied) {
670 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000671 BUG_ON(txo->sent_skb_list[start]);
672 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000674 /* Ensure txq has space for the next skb; Else stop the queue
675 * *BEFORE* ringing the tx doorbell, so that we serialze the
676 * tx compls of the current transmit which'll wake up the queue
677 */
Sathya Perla7101e112010-03-22 20:41:12 +0000678 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000679 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
680 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000681 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000682 stopped = true;
683 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000685 be_txq_notify(adapter, txq->id, wrb_cnt);
686
Sathya Perla3c8def92011-06-12 20:01:58 +0000687 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000688 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000689 } else {
690 txq->head = start;
691 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 return NETDEV_TX_OK;
694}
695
696static int be_change_mtu(struct net_device *netdev, int new_mtu)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000700 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
701 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 dev_info(&adapter->pdev->dev,
703 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000704 BE_MIN_MTU,
705 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706 return -EINVAL;
707 }
708 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
709 netdev->mtu, new_mtu);
710 netdev->mtu = new_mtu;
711 return 0;
712}
713
714/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000715 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
716 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000718static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 u16 vtag[BE_NUM_VLANS_SUPPORTED];
721 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000722 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000723 u32 if_handle;
724
725 if (vf) {
726 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
727 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
728 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
729 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000731 /* No need to further configure vids if in promiscuous mode */
732 if (adapter->promiscuous)
733 return 0;
734
Ajit Khaparde82903e42010-02-09 01:34:57 +0000735 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000737 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738 if (adapter->vlan_tag[i]) {
739 vtag[ntags] = cpu_to_le16(i);
740 ntags++;
741 }
742 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700743 status = be_cmd_vlan_config(adapter, adapter->if_handle,
744 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700746 status = be_cmd_vlan_config(adapter, adapter->if_handle,
747 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000749
Sathya Perlab31c50a2009-09-17 10:30:13 -0700750 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751}
752
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
754{
755 struct be_adapter *adapter = netdev_priv(netdev);
756
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000757 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000758 if (!be_physfn(adapter))
759 return;
760
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000762 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000763 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764}
765
766static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000770 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000771
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000772 if (!be_physfn(adapter))
773 return;
774
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000776 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000777 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778}
779
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780static void be_set_multicast_list(struct net_device *netdev)
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
783
784 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000785 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000786 adapter->promiscuous = true;
787 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000789
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300790 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000791 if (adapter->promiscuous) {
792 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000793 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794
795 if (adapter->vlans_added)
796 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000797 }
798
Sathya Perlae7b909a2009-11-22 22:01:10 +0000799 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000800 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000801 netdev_mc_count(netdev) > BE_MAX_MC) {
802 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000803 goto done;
804 }
805
Sathya Perla5b8821b2011-08-02 19:57:44 +0000806 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000807done:
808 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809}
810
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000811static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
812{
813 struct be_adapter *adapter = netdev_priv(netdev);
814 int status;
815
816 if (!adapter->sriov_enabled)
817 return -EPERM;
818
819 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
820 return -EINVAL;
821
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000822 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
823 status = be_cmd_pmac_del(adapter,
824 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000825 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000826
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000827 status = be_cmd_pmac_add(adapter, mac,
828 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000829 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000830
831 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000832 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
833 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000834 else
835 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
836
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000837 return status;
838}
839
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000840static int be_get_vf_config(struct net_device *netdev, int vf,
841 struct ifla_vf_info *vi)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (vf >= num_vfs)
849 return -EINVAL;
850
851 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000852 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000853 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000854 vi->qos = 0;
855 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
856
857 return 0;
858}
859
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000860static int be_set_vf_vlan(struct net_device *netdev,
861 int vf, u16 vlan, u8 qos)
862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864 int status = 0;
865
866 if (!adapter->sriov_enabled)
867 return -EPERM;
868
869 if ((vf >= num_vfs) || (vlan > 4095))
870 return -EINVAL;
871
872 if (vlan) {
873 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
874 adapter->vlans_added++;
875 } else {
876 adapter->vf_cfg[vf].vf_vlan_tag = 0;
877 adapter->vlans_added--;
878 }
879
880 status = be_vid_config(adapter, true, vf);
881
882 if (status)
883 dev_info(&adapter->pdev->dev,
884 "VLAN %d config on VF %d failed\n", vlan, vf);
885 return status;
886}
887
Ajit Khapardee1d18732010-07-23 01:52:13 +0000888static int be_set_vf_tx_rate(struct net_device *netdev,
889 int vf, int rate)
890{
891 struct be_adapter *adapter = netdev_priv(netdev);
892 int status = 0;
893
894 if (!adapter->sriov_enabled)
895 return -EPERM;
896
897 if ((vf >= num_vfs) || (rate < 0))
898 return -EINVAL;
899
900 if (rate > 10000)
901 rate = 10000;
902
903 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000904 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000905
906 if (status)
907 dev_info(&adapter->pdev->dev,
908 "tx rate %d on VF %d failed\n", rate, vf);
909 return status;
910}
911
Sathya Perlaac124ff2011-07-25 19:10:14 +0000912static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700913{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000914 struct be_eq_obj *rx_eq = &rxo->rx_eq;
915 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700916 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000917 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000918 u64 pkts;
919 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000920
921 if (!rx_eq->enable_aic)
922 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700923
Sathya Perla4097f662009-03-24 16:40:13 -0700924 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700925 if (time_before(now, stats->rx_jiffies)) {
926 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700927 return;
928 }
929
Sathya Perlaac124ff2011-07-25 19:10:14 +0000930 /* Update once a second */
931 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700932 return;
933
Sathya Perlaab1594e2011-07-25 19:10:15 +0000934 do {
935 start = u64_stats_fetch_begin_bh(&stats->sync);
936 pkts = stats->rx_pkts;
937 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
938
Eric Dumazet68c3e5a2011-08-09 06:23:07 +0000939 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000940 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700941 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000942 eqd = stats->rx_pps / 110000;
943 eqd = eqd << 3;
944 if (eqd > rx_eq->max_eqd)
945 eqd = rx_eq->max_eqd;
946 if (eqd < rx_eq->min_eqd)
947 eqd = rx_eq->min_eqd;
948 if (eqd < 10)
949 eqd = 0;
950 if (eqd != rx_eq->cur_eqd) {
951 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
952 rx_eq->cur_eqd = eqd;
953 }
Sathya Perla4097f662009-03-24 16:40:13 -0700954}
955
Sathya Perla3abcded2010-10-03 22:12:27 -0700956static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000957 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700958{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000959 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700960
Sathya Perlaab1594e2011-07-25 19:10:15 +0000961 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700962 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000963 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700964 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000965 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700966 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000967 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000968 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000969 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970}
971
Sathya Perla2e588f82011-03-11 02:49:26 +0000972static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700973{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000974 /* L4 checksum is not reliable for non TCP/UDP packets.
975 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
977 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700978}
979
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700980static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700981get_rx_page_info(struct be_adapter *adapter,
982 struct be_rx_obj *rxo,
983 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984{
985 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700986 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987
Sathya Perla3abcded2010-10-03 22:12:27 -0700988 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989 BUG_ON(!rx_page_info->page);
990
Ajit Khaparde205859a2010-02-09 01:34:21 +0000991 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000992 dma_unmap_page(&adapter->pdev->dev,
993 dma_unmap_addr(rx_page_info, bus),
994 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000995 rx_page_info->last_page_user = false;
996 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700997
998 atomic_dec(&rxq->used);
999 return rx_page_info;
1000}
1001
1002/* Throwaway the data in the Rx completion */
1003static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001004 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001005 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006{
Sathya Perla3abcded2010-10-03 22:12:27 -07001007 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001009 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001011 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001012 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001013 put_page(page_info->page);
1014 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001015 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016 }
1017}
1018
1019/*
1020 * skb_fill_rx_data forms a complete skb for an ether frame
1021 * indicated by rxcp.
1022 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001023static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001024 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025{
Sathya Perla3abcded2010-10-03 22:12:27 -07001026 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001028 u16 i, j;
1029 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030 u8 *start;
1031
Sathya Perla2e588f82011-03-11 02:49:26 +00001032 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033 start = page_address(page_info->page) + page_info->page_offset;
1034 prefetch(start);
1035
1036 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038
1039 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001040 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041 memcpy(skb->data, start, hdr_len);
1042 skb->len = curr_frag_len;
1043 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1044 /* Complete packet has now been moved to data */
1045 put_page(page_info->page);
1046 skb->data_len = 0;
1047 skb->tail += curr_frag_len;
1048 } else {
1049 skb_shinfo(skb)->nr_frags = 1;
1050 skb_shinfo(skb)->frags[0].page = page_info->page;
1051 skb_shinfo(skb)->frags[0].page_offset =
1052 page_info->page_offset + hdr_len;
1053 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1054 skb->data_len = curr_frag_len - hdr_len;
1055 skb->tail += hdr_len;
1056 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001057 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058
Sathya Perla2e588f82011-03-11 02:49:26 +00001059 if (rxcp->pkt_size <= rx_frag_size) {
1060 BUG_ON(rxcp->num_rcvd != 1);
1061 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 }
1063
1064 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 remaining = rxcp->pkt_size - curr_frag_len;
1067 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1068 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1069 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001071 /* Coalesce all frags from the same physical page in one slot */
1072 if (page_info->page_offset == 0) {
1073 /* Fresh page */
1074 j++;
1075 skb_shinfo(skb)->frags[j].page = page_info->page;
1076 skb_shinfo(skb)->frags[j].page_offset =
1077 page_info->page_offset;
1078 skb_shinfo(skb)->frags[j].size = 0;
1079 skb_shinfo(skb)->nr_frags++;
1080 } else {
1081 put_page(page_info->page);
1082 }
1083
1084 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085 skb->len += curr_frag_len;
1086 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087
Sathya Perla2e588f82011-03-11 02:49:26 +00001088 remaining -= curr_frag_len;
1089 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001090 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001092 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093}
1094
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001095/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001097 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001098 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001100 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001102
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001103 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001104 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001105 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001106 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107 return;
1108 }
1109
Sathya Perla2e588f82011-03-11 02:49:26 +00001110 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001113 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001114 else
1115 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
1117 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001118 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001119 if (adapter->netdev->features & NETIF_F_RXHASH)
1120 skb->rxhash = rxcp->rss_hash;
1121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001123 if (unlikely(rxcp->vlanf))
1124 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1125
1126 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127}
1128
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001129/* Process the RX completion indicated by rxcp when GRO is enabled */
1130static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001131 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001132 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133{
1134 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001135 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001136 struct be_queue_info *rxq = &rxo->q;
1137 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001138 u16 remaining, curr_frag_len;
1139 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001140
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001141 skb = napi_get_frags(&eq_obj->napi);
1142 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001143 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001144 return;
1145 }
1146
Sathya Perla2e588f82011-03-11 02:49:26 +00001147 remaining = rxcp->pkt_size;
1148 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1149 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150
1151 curr_frag_len = min(remaining, rx_frag_size);
1152
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001153 /* Coalesce all frags from the same physical page in one slot */
1154 if (i == 0 || page_info->page_offset == 0) {
1155 /* First frag or Fresh page */
1156 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001157 skb_shinfo(skb)->frags[j].page = page_info->page;
1158 skb_shinfo(skb)->frags[j].page_offset =
1159 page_info->page_offset;
1160 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001161 } else {
1162 put_page(page_info->page);
1163 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001164 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001165
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001167 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168 memset(page_info, 0, sizeof(*page_info));
1169 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001170 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001172 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001173 skb->len = rxcp->pkt_size;
1174 skb->data_len = rxcp->pkt_size;
1175 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001176 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001177 if (adapter->netdev->features & NETIF_F_RXHASH)
1178 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001179
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001180 if (unlikely(rxcp->vlanf))
1181 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1182
1183 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184}
1185
Sathya Perla2e588f82011-03-11 02:49:26 +00001186static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1187 struct be_eth_rx_compl *compl,
1188 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189{
Sathya Perla2e588f82011-03-11 02:49:26 +00001190 rxcp->pkt_size =
1191 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1192 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1193 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1194 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001195 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001196 rxcp->ip_csum =
1197 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1198 rxcp->l4_csum =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1200 rxcp->ipv6 =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1202 rxcp->rxq_idx =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1204 rxcp->num_rcvd =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1206 rxcp->pkt_type =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001208 rxcp->rss_hash =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001210 if (rxcp->vlanf) {
1211 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001212 compl);
1213 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1214 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001215 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001216 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001217}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218
Sathya Perla2e588f82011-03-11 02:49:26 +00001219static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1220 struct be_eth_rx_compl *compl,
1221 struct be_rx_compl_info *rxcp)
1222{
1223 rxcp->pkt_size =
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1225 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1226 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1227 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001228 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 rxcp->ip_csum =
1230 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1231 rxcp->l4_csum =
1232 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1233 rxcp->ipv6 =
1234 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1235 rxcp->rxq_idx =
1236 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1237 rxcp->num_rcvd =
1238 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1239 rxcp->pkt_type =
1240 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001241 rxcp->rss_hash =
1242 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001243 if (rxcp->vlanf) {
1244 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001245 compl);
1246 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1247 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001248 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001249 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001250}
1251
1252static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1253{
1254 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1255 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1256 struct be_adapter *adapter = rxo->adapter;
1257
1258 /* For checking the valid bit it is Ok to use either definition as the
1259 * valid bit is at the same position in both v0 and v1 Rx compl */
1260 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261 return NULL;
1262
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001263 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001264 be_dws_le_to_cpu(compl, sizeof(*compl));
1265
1266 if (adapter->be3_native)
1267 be_parse_rx_compl_v1(adapter, compl, rxcp);
1268 else
1269 be_parse_rx_compl_v0(adapter, compl, rxcp);
1270
Sathya Perla15d72182011-03-21 20:49:26 +00001271 if (rxcp->vlanf) {
1272 /* vlanf could be wrongly set in some cards.
1273 * ignore if vtm is not set */
1274 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1275 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001276
Sathya Perla15d72182011-03-21 20:49:26 +00001277 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001278 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001279
Somnath Kotur939cf302011-08-18 21:51:49 -07001280 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001281 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001282 rxcp->vlanf = 0;
1283 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001284
1285 /* As the compl has been parsed, reset it; we wont touch it again */
1286 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287
Sathya Perla3abcded2010-10-03 22:12:27 -07001288 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289 return rxcp;
1290}
1291
Eric Dumazet1829b082011-03-01 05:48:12 +00001292static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001295
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001297 gfp |= __GFP_COMP;
1298 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299}
1300
1301/*
1302 * Allocate a page, split it to fragments of size rx_frag_size and post as
1303 * receive buffers to BE
1304 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001305static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001306{
Sathya Perla3abcded2010-10-03 22:12:27 -07001307 struct be_adapter *adapter = rxo->adapter;
1308 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001309 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001310 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001311 struct page *pagep = NULL;
1312 struct be_eth_rx_d *rxd;
1313 u64 page_dmaaddr = 0, frag_dmaaddr;
1314 u32 posted, page_offset = 0;
1315
Sathya Perla3abcded2010-10-03 22:12:27 -07001316 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1318 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001319 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001321 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001322 break;
1323 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001324 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1325 0, adapter->big_page_size,
1326 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 page_info->page_offset = 0;
1328 } else {
1329 get_page(pagep);
1330 page_info->page_offset = page_offset + rx_frag_size;
1331 }
1332 page_offset = page_info->page_offset;
1333 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001334 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1336
1337 rxd = queue_head_node(rxq);
1338 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1339 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340
1341 /* Any space left in the current big page for another frag? */
1342 if ((page_offset + rx_frag_size + rx_frag_size) >
1343 adapter->big_page_size) {
1344 pagep = NULL;
1345 page_info->last_page_user = true;
1346 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001347
1348 prev_page_info = page_info;
1349 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350 page_info = &page_info_tbl[rxq->head];
1351 }
1352 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001353 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001354
1355 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001357 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001358 } else if (atomic_read(&rxq->used) == 0) {
1359 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001360 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362}
1363
Sathya Perla5fb379e2009-06-18 00:02:59 +00001364static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1367
1368 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1369 return NULL;
1370
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001371 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1373
1374 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1375
1376 queue_tail_inc(tx_cq);
1377 return txcp;
1378}
1379
Sathya Perla3c8def92011-06-12 20:01:58 +00001380static u16 be_tx_compl_process(struct be_adapter *adapter,
1381 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382{
Sathya Perla3c8def92011-06-12 20:01:58 +00001383 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001384 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001385 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001387 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1388 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001390 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001392 sent_skbs[txq->tail] = NULL;
1393
1394 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001395 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001397 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001399 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001400 unmap_tx_frag(&adapter->pdev->dev, wrb,
1401 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001402 unmap_skb_hdr = false;
1403
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 num_wrbs++;
1405 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001406 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001409 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410}
1411
Sathya Perla859b1e42009-08-10 03:43:51 +00001412static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1413{
1414 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1415
1416 if (!eqe->evt)
1417 return NULL;
1418
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001419 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001420 eqe->evt = le32_to_cpu(eqe->evt);
1421 queue_tail_inc(&eq_obj->q);
1422 return eqe;
1423}
1424
1425static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001426 struct be_eq_obj *eq_obj,
1427 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001428{
1429 struct be_eq_entry *eqe;
1430 u16 num = 0;
1431
1432 while ((eqe = event_get(eq_obj)) != NULL) {
1433 eqe->evt = 0;
1434 num++;
1435 }
1436
1437 /* Deal with any spurious interrupts that come
1438 * without events
1439 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001440 if (!num)
1441 rearm = true;
1442
1443 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001444 if (num)
1445 napi_schedule(&eq_obj->napi);
1446
1447 return num;
1448}
1449
1450/* Just read and notify events without processing them.
1451 * Used at the time of destroying event queues */
1452static void be_eq_clean(struct be_adapter *adapter,
1453 struct be_eq_obj *eq_obj)
1454{
1455 struct be_eq_entry *eqe;
1456 u16 num = 0;
1457
1458 while ((eqe = event_get(eq_obj)) != NULL) {
1459 eqe->evt = 0;
1460 num++;
1461 }
1462
1463 if (num)
1464 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1465}
1466
Sathya Perla3abcded2010-10-03 22:12:27 -07001467static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468{
1469 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001470 struct be_queue_info *rxq = &rxo->q;
1471 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001472 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 u16 tail;
1474
1475 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001476 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1477 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001478 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 }
1480
1481 /* Then free posted rx buffer that were not used */
1482 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001483 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001484 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 put_page(page_info->page);
1486 memset(page_info, 0, sizeof(*page_info));
1487 }
1488 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001489 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
Sathya Perla3c8def92011-06-12 20:01:58 +00001492static void be_tx_compl_clean(struct be_adapter *adapter,
1493 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494{
Sathya Perla3c8def92011-06-12 20:01:58 +00001495 struct be_queue_info *tx_cq = &txo->cq;
1496 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001497 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001498 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001499 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001500 struct sk_buff *sent_skb;
1501 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001502
Sathya Perlaa8e91792009-08-10 03:42:43 +00001503 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1504 do {
1505 while ((txcp = be_tx_compl_get(tx_cq))) {
1506 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1507 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001508 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001509 cmpl++;
1510 }
1511 if (cmpl) {
1512 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001513 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001514 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001515 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001516 }
1517
1518 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1519 break;
1520
1521 mdelay(1);
1522 } while (true);
1523
1524 if (atomic_read(&txq->used))
1525 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1526 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001527
1528 /* free posted tx for which compls will never arrive */
1529 while (atomic_read(&txq->used)) {
1530 sent_skb = sent_skbs[txq->tail];
1531 end_idx = txq->tail;
1532 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001533 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1534 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001535 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001536 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001537 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538}
1539
Sathya Perla5fb379e2009-06-18 00:02:59 +00001540static void be_mcc_queues_destroy(struct be_adapter *adapter)
1541{
1542 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001543
Sathya Perla8788fdc2009-07-27 22:52:03 +00001544 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001545 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001546 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001547 be_queue_free(adapter, q);
1548
Sathya Perla8788fdc2009-07-27 22:52:03 +00001549 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001550 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001551 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001552 be_queue_free(adapter, q);
1553}
1554
1555/* Must be called only after TX qs are created as MCC shares TX EQ */
1556static int be_mcc_queues_create(struct be_adapter *adapter)
1557{
1558 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001559
1560 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001561 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001562 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001563 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001564 goto err;
1565
1566 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001567 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001568 goto mcc_cq_free;
1569
1570 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001571 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001572 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1573 goto mcc_cq_destroy;
1574
1575 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001576 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001577 goto mcc_q_free;
1578
1579 return 0;
1580
1581mcc_q_free:
1582 be_queue_free(adapter, q);
1583mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001584 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001585mcc_cq_free:
1586 be_queue_free(adapter, cq);
1587err:
1588 return -1;
1589}
1590
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591static void be_tx_queues_destroy(struct be_adapter *adapter)
1592{
1593 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001594 struct be_tx_obj *txo;
1595 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596
Sathya Perla3c8def92011-06-12 20:01:58 +00001597 for_all_tx_queues(adapter, txo, i) {
1598 q = &txo->q;
1599 if (q->created)
1600 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1601 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602
Sathya Perla3c8def92011-06-12 20:01:58 +00001603 q = &txo->cq;
1604 if (q->created)
1605 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1606 be_queue_free(adapter, q);
1607 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608
Sathya Perla859b1e42009-08-10 03:43:51 +00001609 /* Clear any residual events */
1610 be_eq_clean(adapter, &adapter->tx_eq);
1611
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 q = &adapter->tx_eq.q;
1613 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001614 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615 be_queue_free(adapter, q);
1616}
1617
Sathya Perla3c8def92011-06-12 20:01:58 +00001618/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619static int be_tx_queues_create(struct be_adapter *adapter)
1620{
1621 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001622 struct be_tx_obj *txo;
1623 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624
1625 adapter->tx_eq.max_eqd = 0;
1626 adapter->tx_eq.min_eqd = 0;
1627 adapter->tx_eq.cur_eqd = 96;
1628 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001629
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001631 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1632 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633 return -1;
1634
Sathya Perla8788fdc2009-07-27 22:52:03 +00001635 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001636 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001637 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001638
Sathya Perla3c8def92011-06-12 20:01:58 +00001639 for_all_tx_queues(adapter, txo, i) {
1640 cq = &txo->cq;
1641 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001643 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644
Sathya Perla3c8def92011-06-12 20:01:58 +00001645 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1646 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647
Sathya Perla3c8def92011-06-12 20:01:58 +00001648 q = &txo->q;
1649 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1650 sizeof(struct be_eth_wrb)))
1651 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652
Sathya Perla3c8def92011-06-12 20:01:58 +00001653 if (be_cmd_txq_create(adapter, q, cq))
1654 goto err;
1655 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656 return 0;
1657
Sathya Perla3c8def92011-06-12 20:01:58 +00001658err:
1659 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660 return -1;
1661}
1662
1663static void be_rx_queues_destroy(struct be_adapter *adapter)
1664{
1665 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001666 struct be_rx_obj *rxo;
1667 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668
Sathya Perla3abcded2010-10-03 22:12:27 -07001669 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001670 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001671
Sathya Perla3abcded2010-10-03 22:12:27 -07001672 q = &rxo->cq;
1673 if (q->created)
1674 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1675 be_queue_free(adapter, q);
1676
Sathya Perla3abcded2010-10-03 22:12:27 -07001677 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001678 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001679 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001680 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682}
1683
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001684static u32 be_num_rxqs_want(struct be_adapter *adapter)
1685{
Sathya Perlac814fd32011-06-26 20:41:25 +00001686 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001687 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1688 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1689 } else {
1690 dev_warn(&adapter->pdev->dev,
1691 "No support for multiple RX queues\n");
1692 return 1;
1693 }
1694}
1695
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696static int be_rx_queues_create(struct be_adapter *adapter)
1697{
1698 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001699 struct be_rx_obj *rxo;
1700 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001702 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1703 msix_enabled(adapter) ?
1704 adapter->num_msix_vec - 1 : 1);
1705 if (adapter->num_rx_qs != MAX_RX_QS)
1706 dev_warn(&adapter->pdev->dev,
1707 "Can create only %d RX queues", adapter->num_rx_qs);
1708
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001710 for_all_rx_queues(adapter, rxo, i) {
1711 rxo->adapter = adapter;
1712 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1713 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714
Sathya Perla3abcded2010-10-03 22:12:27 -07001715 /* EQ */
1716 eq = &rxo->rx_eq.q;
1717 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1718 sizeof(struct be_eq_entry));
1719 if (rc)
1720 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721
Sathya Perla3abcded2010-10-03 22:12:27 -07001722 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1723 if (rc)
1724 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001726 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001727
Sathya Perla3abcded2010-10-03 22:12:27 -07001728 /* CQ */
1729 cq = &rxo->cq;
1730 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1731 sizeof(struct be_eth_rx_compl));
1732 if (rc)
1733 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734
Sathya Perla3abcded2010-10-03 22:12:27 -07001735 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1736 if (rc)
1737 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001738
1739 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001740 q = &rxo->q;
1741 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1742 sizeof(struct be_eth_rx_d));
1743 if (rc)
1744 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745
Sathya Perla3abcded2010-10-03 22:12:27 -07001746 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747
1748 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001749err:
1750 be_rx_queues_destroy(adapter);
1751 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001754static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001755{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001756 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1757 if (!eqe->evt)
1758 return false;
1759 else
1760 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001761}
1762
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763static irqreturn_t be_intx(int irq, void *dev)
1764{
1765 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001766 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001767 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001769 if (lancer_chip(adapter)) {
1770 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001771 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001772 for_all_rx_queues(adapter, rxo, i) {
1773 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001774 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001775 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001777 if (!(tx || rx))
1778 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001779
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001780 } else {
1781 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1782 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1783 if (!isr)
1784 return IRQ_NONE;
1785
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001786 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001787 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001788
1789 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001790 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001791 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001792 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001793 }
Sathya Perlac001c212009-07-01 01:06:07 +00001794
Sathya Perla8788fdc2009-07-27 22:52:03 +00001795 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796}
1797
1798static irqreturn_t be_msix_rx(int irq, void *dev)
1799{
Sathya Perla3abcded2010-10-03 22:12:27 -07001800 struct be_rx_obj *rxo = dev;
1801 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802
Sathya Perla3c8def92011-06-12 20:01:58 +00001803 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804
1805 return IRQ_HANDLED;
1806}
1807
Sathya Perla5fb379e2009-06-18 00:02:59 +00001808static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809{
1810 struct be_adapter *adapter = dev;
1811
Sathya Perla3c8def92011-06-12 20:01:58 +00001812 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813
1814 return IRQ_HANDLED;
1815}
1816
Sathya Perla2e588f82011-03-11 02:49:26 +00001817static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818{
Sathya Perla2e588f82011-03-11 02:49:26 +00001819 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820}
1821
stephen hemminger49b05222010-10-21 07:50:48 +00001822static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823{
1824 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001825 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1826 struct be_adapter *adapter = rxo->adapter;
1827 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001828 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829 u32 work_done;
1830
Sathya Perlaac124ff2011-07-25 19:10:14 +00001831 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001833 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834 if (!rxcp)
1835 break;
1836
Sathya Perla12004ae2011-08-02 19:57:46 +00001837 /* Is it a flush compl that has no data */
1838 if (unlikely(rxcp->num_rcvd == 0))
1839 goto loop_continue;
1840
1841 /* Discard compl with partial DMA Lancer B0 */
1842 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001843 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001844 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001845 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001846
Sathya Perla12004ae2011-08-02 19:57:46 +00001847 /* On BE drop pkts that arrive due to imperfect filtering in
1848 * promiscuous mode on some skews
1849 */
1850 if (unlikely(rxcp->port != adapter->port_num &&
1851 !lancer_chip(adapter))) {
1852 be_rx_compl_discard(adapter, rxo, rxcp);
1853 goto loop_continue;
1854 }
1855
1856 if (do_gro(rxcp))
1857 be_rx_compl_process_gro(adapter, rxo, rxcp);
1858 else
1859 be_rx_compl_process(adapter, rxo, rxcp);
1860loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001861 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862 }
1863
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001865 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001866 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867
1868 /* All consumed */
1869 if (work_done < budget) {
1870 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001871 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872 } else {
1873 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001874 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875 }
1876 return work_done;
1877}
1878
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001879/* As TX and MCC share the same EQ check for both TX and MCC completions.
1880 * For TX/MCC we don't honour budget; consume everything
1881 */
1882static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001884 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1885 struct be_adapter *adapter =
1886 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001887 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001888 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001889 int tx_compl, mcc_compl, status = 0;
1890 u8 i;
1891 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001892
Sathya Perla3c8def92011-06-12 20:01:58 +00001893 for_all_tx_queues(adapter, txo, i) {
1894 tx_compl = 0;
1895 num_wrbs = 0;
1896 while ((txcp = be_tx_compl_get(&txo->cq))) {
1897 num_wrbs += be_tx_compl_process(adapter, txo,
1898 AMAP_GET_BITS(struct amap_eth_tx_compl,
1899 wrb_index, txcp));
1900 tx_compl++;
1901 }
1902 if (tx_compl) {
1903 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1904
1905 atomic_sub(num_wrbs, &txo->q.used);
1906
1907 /* As Tx wrbs have been freed up, wake up netdev queue
1908 * if it was stopped due to lack of tx wrbs. */
1909 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1910 atomic_read(&txo->q.used) < txo->q.len / 2) {
1911 netif_wake_subqueue(adapter->netdev, i);
1912 }
1913
Sathya Perlaab1594e2011-07-25 19:10:15 +00001914 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001915 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001916 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001917 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 }
1919
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001920 mcc_compl = be_process_mcc(adapter, &status);
1921
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001922 if (mcc_compl) {
1923 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1924 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1925 }
1926
Sathya Perla3c8def92011-06-12 20:01:58 +00001927 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001928
Sathya Perla3c8def92011-06-12 20:01:58 +00001929 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001930 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931 return 1;
1932}
1933
Ajit Khaparded053de92010-09-03 06:23:30 +00001934void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001935{
1936 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1937 u32 i;
1938
1939 pci_read_config_dword(adapter->pdev,
1940 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1941 pci_read_config_dword(adapter->pdev,
1942 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1943 pci_read_config_dword(adapter->pdev,
1944 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1945 pci_read_config_dword(adapter->pdev,
1946 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1947
1948 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1949 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1950
Ajit Khaparded053de92010-09-03 06:23:30 +00001951 if (ue_status_lo || ue_status_hi) {
1952 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001953 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001954 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1955 }
1956
Ajit Khaparde7c185272010-07-29 06:16:33 +00001957 if (ue_status_lo) {
1958 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1959 if (ue_status_lo & 1)
1960 dev_err(&adapter->pdev->dev,
1961 "UE: %s bit set\n", ue_status_low_desc[i]);
1962 }
1963 }
1964 if (ue_status_hi) {
1965 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1966 if (ue_status_hi & 1)
1967 dev_err(&adapter->pdev->dev,
1968 "UE: %s bit set\n", ue_status_hi_desc[i]);
1969 }
1970 }
1971
1972}
1973
Sathya Perlaea1dae12009-03-19 23:56:20 -07001974static void be_worker(struct work_struct *work)
1975{
1976 struct be_adapter *adapter =
1977 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001978 struct be_rx_obj *rxo;
1979 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001980
Sathya Perla16da8252011-03-21 20:49:27 +00001981 if (!adapter->ue_detected && !lancer_chip(adapter))
1982 be_detect_dump_ue(adapter);
1983
Somnath Koturf203af72010-10-25 23:01:03 +00001984 /* when interrupts are not yet enabled, just reap any pending
1985 * mcc completions */
1986 if (!netif_running(adapter->netdev)) {
1987 int mcc_compl, status = 0;
1988
1989 mcc_compl = be_process_mcc(adapter, &status);
1990
1991 if (mcc_compl) {
1992 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1993 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1994 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00001995
Somnath Koturf203af72010-10-25 23:01:03 +00001996 goto reschedule;
1997 }
1998
Selvin Xavier005d5692011-05-16 07:36:35 +00001999 if (!adapter->stats_cmd_sent) {
2000 if (lancer_chip(adapter))
2001 lancer_cmd_get_pport_stats(adapter,
2002 &adapter->stats_cmd);
2003 else
2004 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2005 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002006
Sathya Perla3abcded2010-10-03 22:12:27 -07002007 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002008 be_rx_eqd_update(adapter, rxo);
2009
2010 if (rxo->rx_post_starved) {
2011 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002012 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002013 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002014 }
2015
Somnath Koturf203af72010-10-25 23:01:03 +00002016reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002017 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002018 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2019}
2020
Sathya Perla8d56ff12009-11-22 22:02:26 +00002021static void be_msix_disable(struct be_adapter *adapter)
2022{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002023 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002024 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002025 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002026 }
2027}
2028
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029static void be_msix_enable(struct be_adapter *adapter)
2030{
Sathya Perla3abcded2010-10-03 22:12:27 -07002031#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002032 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002034 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002035
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002036 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037 adapter->msix_entries[i].entry = i;
2038
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002039 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002040 if (status == 0) {
2041 goto done;
2042 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002043 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002044 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002045 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002046 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002047 }
2048 return;
2049done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002050 adapter->num_msix_vec = num_vec;
2051 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052}
2053
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002054static void be_sriov_enable(struct be_adapter *adapter)
2055{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002056 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002057#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002058 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002059 int status, pos;
2060 u16 nvfs;
2061
2062 pos = pci_find_ext_capability(adapter->pdev,
2063 PCI_EXT_CAP_ID_SRIOV);
2064 pci_read_config_word(adapter->pdev,
2065 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2066
2067 if (num_vfs > nvfs) {
2068 dev_info(&adapter->pdev->dev,
2069 "Device supports %d VFs and not %d\n",
2070 nvfs, num_vfs);
2071 num_vfs = nvfs;
2072 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002073
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002074 status = pci_enable_sriov(adapter->pdev, num_vfs);
2075 adapter->sriov_enabled = status ? false : true;
2076 }
2077#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002078}
2079
2080static void be_sriov_disable(struct be_adapter *adapter)
2081{
2082#ifdef CONFIG_PCI_IOV
2083 if (adapter->sriov_enabled) {
2084 pci_disable_sriov(adapter->pdev);
2085 adapter->sriov_enabled = false;
2086 }
2087#endif
2088}
2089
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002090static inline int be_msix_vec_get(struct be_adapter *adapter,
2091 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002093 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002094}
2095
2096static int be_request_irq(struct be_adapter *adapter,
2097 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002098 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002099{
2100 struct net_device *netdev = adapter->netdev;
2101 int vec;
2102
2103 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002104 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002105 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002106}
2107
Sathya Perla3abcded2010-10-03 22:12:27 -07002108static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2109 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002110{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002111 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002112 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113}
2114
2115static int be_msix_register(struct be_adapter *adapter)
2116{
Sathya Perla3abcded2010-10-03 22:12:27 -07002117 struct be_rx_obj *rxo;
2118 int status, i;
2119 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120
Sathya Perla3abcded2010-10-03 22:12:27 -07002121 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2122 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123 if (status)
2124 goto err;
2125
Sathya Perla3abcded2010-10-03 22:12:27 -07002126 for_all_rx_queues(adapter, rxo, i) {
2127 sprintf(qname, "rxq%d", i);
2128 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2129 qname, rxo);
2130 if (status)
2131 goto err_msix;
2132 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002133
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002135
Sathya Perla3abcded2010-10-03 22:12:27 -07002136err_msix:
2137 be_free_irq(adapter, &adapter->tx_eq, adapter);
2138
2139 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2140 be_free_irq(adapter, &rxo->rx_eq, rxo);
2141
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142err:
2143 dev_warn(&adapter->pdev->dev,
2144 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002145 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146 return status;
2147}
2148
2149static int be_irq_register(struct be_adapter *adapter)
2150{
2151 struct net_device *netdev = adapter->netdev;
2152 int status;
2153
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002154 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002155 status = be_msix_register(adapter);
2156 if (status == 0)
2157 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002158 /* INTx is not supported for VF */
2159 if (!be_physfn(adapter))
2160 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161 }
2162
2163 /* INTx */
2164 netdev->irq = adapter->pdev->irq;
2165 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2166 adapter);
2167 if (status) {
2168 dev_err(&adapter->pdev->dev,
2169 "INTx request IRQ failed - err %d\n", status);
2170 return status;
2171 }
2172done:
2173 adapter->isr_registered = true;
2174 return 0;
2175}
2176
2177static void be_irq_unregister(struct be_adapter *adapter)
2178{
2179 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002180 struct be_rx_obj *rxo;
2181 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182
2183 if (!adapter->isr_registered)
2184 return;
2185
2186 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002187 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188 free_irq(netdev->irq, adapter);
2189 goto done;
2190 }
2191
2192 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002193 be_free_irq(adapter, &adapter->tx_eq, adapter);
2194
2195 for_all_rx_queues(adapter, rxo, i)
2196 be_free_irq(adapter, &rxo->rx_eq, rxo);
2197
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198done:
2199 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200}
2201
Sathya Perla482c9e72011-06-29 23:33:17 +00002202static void be_rx_queues_clear(struct be_adapter *adapter)
2203{
2204 struct be_queue_info *q;
2205 struct be_rx_obj *rxo;
2206 int i;
2207
2208 for_all_rx_queues(adapter, rxo, i) {
2209 q = &rxo->q;
2210 if (q->created) {
2211 be_cmd_rxq_destroy(adapter, q);
2212 /* After the rxq is invalidated, wait for a grace time
2213 * of 1ms for all dma to end and the flush compl to
2214 * arrive
2215 */
2216 mdelay(1);
2217 be_rx_q_clean(adapter, rxo);
2218 }
2219
2220 /* Clear any residual events */
2221 q = &rxo->rx_eq.q;
2222 if (q->created)
2223 be_eq_clean(adapter, &rxo->rx_eq);
2224 }
2225}
2226
Sathya Perla889cd4b2010-05-30 23:33:45 +00002227static int be_close(struct net_device *netdev)
2228{
2229 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002230 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002231 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002232 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002233 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002234
Sathya Perla889cd4b2010-05-30 23:33:45 +00002235 be_async_mcc_disable(adapter);
2236
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002237 if (!lancer_chip(adapter))
2238 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002239
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002240 for_all_rx_queues(adapter, rxo, i)
2241 napi_disable(&rxo->rx_eq.napi);
2242
2243 napi_disable(&tx_eq->napi);
2244
2245 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002246 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2247 for_all_rx_queues(adapter, rxo, i)
2248 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002249 for_all_tx_queues(adapter, txo, i)
2250 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002251 }
2252
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002253 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002254 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002255 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002256
2257 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002258 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002259 synchronize_irq(vec);
2260 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002261 } else {
2262 synchronize_irq(netdev->irq);
2263 }
2264 be_irq_unregister(adapter);
2265
Sathya Perla889cd4b2010-05-30 23:33:45 +00002266 /* Wait for all pending tx completions to arrive so that
2267 * all tx skbs are freed.
2268 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002269 for_all_tx_queues(adapter, txo, i)
2270 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002271
Sathya Perla482c9e72011-06-29 23:33:17 +00002272 be_rx_queues_clear(adapter);
2273 return 0;
2274}
2275
2276static int be_rx_queues_setup(struct be_adapter *adapter)
2277{
2278 struct be_rx_obj *rxo;
2279 int rc, i;
2280 u8 rsstable[MAX_RSS_QS];
2281
2282 for_all_rx_queues(adapter, rxo, i) {
2283 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2284 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2285 adapter->if_handle,
2286 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2287 if (rc)
2288 return rc;
2289 }
2290
2291 if (be_multi_rxq(adapter)) {
2292 for_all_rss_queues(adapter, rxo, i)
2293 rsstable[i] = rxo->rss_id;
2294
2295 rc = be_cmd_rss_config(adapter, rsstable,
2296 adapter->num_rx_qs - 1);
2297 if (rc)
2298 return rc;
2299 }
2300
2301 /* First time posting */
2302 for_all_rx_queues(adapter, rxo, i) {
2303 be_post_rx_frags(rxo, GFP_KERNEL);
2304 napi_enable(&rxo->rx_eq.napi);
2305 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002306 return 0;
2307}
2308
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309static int be_open(struct net_device *netdev)
2310{
2311 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002314 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002315
Sathya Perla482c9e72011-06-29 23:33:17 +00002316 status = be_rx_queues_setup(adapter);
2317 if (status)
2318 goto err;
2319
Sathya Perla5fb379e2009-06-18 00:02:59 +00002320 napi_enable(&tx_eq->napi);
2321
2322 be_irq_register(adapter);
2323
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002324 if (!lancer_chip(adapter))
2325 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002326
2327 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002328 for_all_rx_queues(adapter, rxo, i) {
2329 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2330 be_cq_notify(adapter, rxo->cq.id, true, 0);
2331 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002332 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002333
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002334 /* Now that interrupts are on we can process async mcc */
2335 be_async_mcc_enable(adapter);
2336
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002337 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002338 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002339 if (status)
2340 goto err;
2341
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002342 status = be_cmd_set_flow_control(adapter,
2343 adapter->tx_fc, adapter->rx_fc);
2344 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002345 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002346 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002347
Sathya Perla889cd4b2010-05-30 23:33:45 +00002348 return 0;
2349err:
2350 be_close(adapter->netdev);
2351 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002352}
2353
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002354static int be_setup_wol(struct be_adapter *adapter, bool enable)
2355{
2356 struct be_dma_mem cmd;
2357 int status = 0;
2358 u8 mac[ETH_ALEN];
2359
2360 memset(mac, 0, ETH_ALEN);
2361
2362 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002363 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2364 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002365 if (cmd.va == NULL)
2366 return -1;
2367 memset(cmd.va, 0, cmd.size);
2368
2369 if (enable) {
2370 status = pci_write_config_dword(adapter->pdev,
2371 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2372 if (status) {
2373 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002374 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002375 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2376 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002377 return status;
2378 }
2379 status = be_cmd_enable_magic_wol(adapter,
2380 adapter->netdev->dev_addr, &cmd);
2381 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2382 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2383 } else {
2384 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2385 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2386 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2387 }
2388
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002389 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002390 return status;
2391}
2392
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002393/*
2394 * Generate a seed MAC address from the PF MAC Address using jhash.
2395 * MAC Address for VFs are assigned incrementally starting from the seed.
2396 * These addresses are programmed in the ASIC by the PF and the VF driver
2397 * queries for the MAC address during its probe.
2398 */
2399static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2400{
2401 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002402 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002403 u8 mac[ETH_ALEN];
2404
2405 be_vf_eth_addr_generate(adapter, mac);
2406
2407 for (vf = 0; vf < num_vfs; vf++) {
2408 status = be_cmd_pmac_add(adapter, mac,
2409 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002410 &adapter->vf_cfg[vf].vf_pmac_id,
2411 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002412 if (status)
2413 dev_err(&adapter->pdev->dev,
2414 "Mac address add failed for VF %d\n", vf);
2415 else
2416 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2417
2418 mac[5] += 1;
2419 }
2420 return status;
2421}
2422
2423static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2424{
2425 u32 vf;
2426
2427 for (vf = 0; vf < num_vfs; vf++) {
2428 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2429 be_cmd_pmac_del(adapter,
2430 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002431 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002432 }
2433}
2434
Sathya Perla5fb379e2009-06-18 00:02:59 +00002435static int be_setup(struct be_adapter *adapter)
2436{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002437 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002438 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002439 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002440 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002441
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002442 be_cmd_req_native_mode(adapter);
2443
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002444 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2445 BE_IF_FLAGS_BROADCAST |
2446 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002447
2448 if (be_physfn(adapter)) {
2449 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2450 BE_IF_FLAGS_PROMISCUOUS |
2451 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2452 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002453
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002454 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002455 cap_flags |= BE_IF_FLAGS_RSS;
2456 en_flags |= BE_IF_FLAGS_RSS;
2457 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002458 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002459
2460 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2461 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002462 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002463 if (status != 0)
2464 goto do_none;
2465
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002466 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002467 if (adapter->sriov_enabled) {
2468 while (vf < num_vfs) {
2469 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2470 BE_IF_FLAGS_BROADCAST;
2471 status = be_cmd_if_create(adapter, cap_flags,
2472 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002473 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002474 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002475 if (status) {
2476 dev_err(&adapter->pdev->dev,
2477 "Interface Create failed for VF %d\n",
2478 vf);
2479 goto if_destroy;
2480 }
2481 adapter->vf_cfg[vf].vf_pmac_id =
2482 BE_INVALID_PMAC_ID;
2483 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002484 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002485 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002486 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002487 status = be_cmd_mac_addr_query(adapter, mac,
2488 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2489 if (!status) {
2490 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2491 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2492 }
2493 }
2494
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495 status = be_tx_queues_create(adapter);
2496 if (status != 0)
2497 goto if_destroy;
2498
2499 status = be_rx_queues_create(adapter);
2500 if (status != 0)
2501 goto tx_qs_destroy;
2502
Sathya Perla2903dd62011-06-26 20:41:53 +00002503 /* Allow all priorities by default. A GRP5 evt may modify this */
2504 adapter->vlan_prio_bmap = 0xff;
2505
Sathya Perla5fb379e2009-06-18 00:02:59 +00002506 status = be_mcc_queues_create(adapter);
2507 if (status != 0)
2508 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002509
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002510 adapter->link_speed = -1;
2511
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002512 return 0;
2513
Sathya Perla5fb379e2009-06-18 00:02:59 +00002514rx_qs_destroy:
2515 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516tx_qs_destroy:
2517 be_tx_queues_destroy(adapter);
2518if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002519 if (be_physfn(adapter) && adapter->sriov_enabled)
2520 for (vf = 0; vf < num_vfs; vf++)
2521 if (adapter->vf_cfg[vf].vf_if_handle)
2522 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002523 adapter->vf_cfg[vf].vf_if_handle,
2524 vf + 1);
2525 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002526do_none:
2527 return status;
2528}
2529
Sathya Perla5fb379e2009-06-18 00:02:59 +00002530static int be_clear(struct be_adapter *adapter)
2531{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002532 int vf;
2533
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002534 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002535 be_vf_eth_addr_rem(adapter);
2536
Sathya Perla1a8887d2009-08-17 00:58:41 +00002537 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002538 be_rx_queues_destroy(adapter);
2539 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002540 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002541
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002542 if (be_physfn(adapter) && adapter->sriov_enabled)
2543 for (vf = 0; vf < num_vfs; vf++)
2544 if (adapter->vf_cfg[vf].vf_if_handle)
2545 be_cmd_if_destroy(adapter,
2546 adapter->vf_cfg[vf].vf_if_handle,
2547 vf + 1);
2548
Ajit Khaparde658681f2011-02-11 13:34:46 +00002549 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002550
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002551 adapter->be3_native = 0;
2552
Sathya Perla2243e2e2009-11-22 22:02:03 +00002553 /* tell fw we're done with firing cmds */
2554 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002555 return 0;
2556}
2557
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002558
Ajit Khaparde84517482009-09-04 03:12:16 +00002559#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002560static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002561 const u8 *p, u32 img_start, int image_size,
2562 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002563{
2564 u32 crc_offset;
2565 u8 flashed_crc[4];
2566 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002567
2568 crc_offset = hdr_size + img_start + image_size - 4;
2569
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002570 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002571
2572 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002573 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002574 if (status) {
2575 dev_err(&adapter->pdev->dev,
2576 "could not get crc from flash, not flashing redboot\n");
2577 return false;
2578 }
2579
2580 /*update redboot only if crc does not match*/
2581 if (!memcmp(flashed_crc, p, 4))
2582 return false;
2583 else
2584 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002585}
2586
Sathya Perla306f1342011-08-02 19:57:45 +00002587static bool phy_flashing_required(struct be_adapter *adapter)
2588{
2589 int status = 0;
2590 struct be_phy_info phy_info;
2591
2592 status = be_cmd_get_phy_info(adapter, &phy_info);
2593 if (status)
2594 return false;
2595 if ((phy_info.phy_type == TN_8022) &&
2596 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2597 return true;
2598 }
2599 return false;
2600}
2601
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002602static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002603 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002604 struct be_dma_mem *flash_cmd, int num_of_images)
2605
Ajit Khaparde84517482009-09-04 03:12:16 +00002606{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002607 int status = 0, i, filehdr_size = 0;
2608 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002609 int num_bytes;
2610 const u8 *p = fw->data;
2611 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002612 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002613 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002614
Sathya Perla306f1342011-08-02 19:57:45 +00002615 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002616 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2617 FLASH_IMAGE_MAX_SIZE_g3},
2618 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2619 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2620 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2621 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2622 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2623 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2624 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2625 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2626 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2627 FLASH_IMAGE_MAX_SIZE_g3},
2628 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2629 FLASH_IMAGE_MAX_SIZE_g3},
2630 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002631 FLASH_IMAGE_MAX_SIZE_g3},
2632 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002633 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2634 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2635 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002636 };
Joe Perches215faf92010-12-21 02:16:10 -08002637 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002638 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2639 FLASH_IMAGE_MAX_SIZE_g2},
2640 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2641 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2642 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2643 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2644 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2645 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2646 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2647 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2648 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2649 FLASH_IMAGE_MAX_SIZE_g2},
2650 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2651 FLASH_IMAGE_MAX_SIZE_g2},
2652 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2653 FLASH_IMAGE_MAX_SIZE_g2}
2654 };
2655
2656 if (adapter->generation == BE_GEN3) {
2657 pflashcomp = gen3_flash_types;
2658 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002659 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002660 } else {
2661 pflashcomp = gen2_flash_types;
2662 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002663 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002664 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002665 for (i = 0; i < num_comp; i++) {
2666 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2667 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2668 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002669 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2670 if (!phy_flashing_required(adapter))
2671 continue;
2672 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002673 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2674 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002675 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2676 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002677 continue;
2678 p = fw->data;
2679 p += filehdr_size + pflashcomp[i].offset
2680 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002681 if (p + pflashcomp[i].size > fw->data + fw->size)
2682 return -1;
2683 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002684 while (total_bytes) {
2685 if (total_bytes > 32*1024)
2686 num_bytes = 32*1024;
2687 else
2688 num_bytes = total_bytes;
2689 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002690 if (!total_bytes) {
2691 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2692 flash_op = FLASHROM_OPER_PHY_FLASH;
2693 else
2694 flash_op = FLASHROM_OPER_FLASH;
2695 } else {
2696 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2697 flash_op = FLASHROM_OPER_PHY_SAVE;
2698 else
2699 flash_op = FLASHROM_OPER_SAVE;
2700 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002701 memcpy(req->params.data_buf, p, num_bytes);
2702 p += num_bytes;
2703 status = be_cmd_write_flashrom(adapter, flash_cmd,
2704 pflashcomp[i].optype, flash_op, num_bytes);
2705 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002706 if ((status == ILLEGAL_IOCTL_REQ) &&
2707 (pflashcomp[i].optype ==
2708 IMG_TYPE_PHY_FW))
2709 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002710 dev_err(&adapter->pdev->dev,
2711 "cmd to write to flash rom failed.\n");
2712 return -1;
2713 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002714 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002715 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002716 return 0;
2717}
2718
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002719static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2720{
2721 if (fhdr == NULL)
2722 return 0;
2723 if (fhdr->build[0] == '3')
2724 return BE_GEN3;
2725 else if (fhdr->build[0] == '2')
2726 return BE_GEN2;
2727 else
2728 return 0;
2729}
2730
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002731static int lancer_fw_download(struct be_adapter *adapter,
2732 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002733{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002734#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2735#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2736 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002737 const u8 *data_ptr = NULL;
2738 u8 *dest_image_ptr = NULL;
2739 size_t image_size = 0;
2740 u32 chunk_size = 0;
2741 u32 data_written = 0;
2742 u32 offset = 0;
2743 int status = 0;
2744 u8 add_status = 0;
2745
2746 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2747 dev_err(&adapter->pdev->dev,
2748 "FW Image not properly aligned. "
2749 "Length must be 4 byte aligned.\n");
2750 status = -EINVAL;
2751 goto lancer_fw_exit;
2752 }
2753
2754 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2755 + LANCER_FW_DOWNLOAD_CHUNK;
2756 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2757 &flash_cmd.dma, GFP_KERNEL);
2758 if (!flash_cmd.va) {
2759 status = -ENOMEM;
2760 dev_err(&adapter->pdev->dev,
2761 "Memory allocation failure while flashing\n");
2762 goto lancer_fw_exit;
2763 }
2764
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002765 dest_image_ptr = flash_cmd.va +
2766 sizeof(struct lancer_cmd_req_write_object);
2767 image_size = fw->size;
2768 data_ptr = fw->data;
2769
2770 while (image_size) {
2771 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2772
2773 /* Copy the image chunk content. */
2774 memcpy(dest_image_ptr, data_ptr, chunk_size);
2775
2776 status = lancer_cmd_write_object(adapter, &flash_cmd,
2777 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2778 &data_written, &add_status);
2779
2780 if (status)
2781 break;
2782
2783 offset += data_written;
2784 data_ptr += data_written;
2785 image_size -= data_written;
2786 }
2787
2788 if (!status) {
2789 /* Commit the FW written */
2790 status = lancer_cmd_write_object(adapter, &flash_cmd,
2791 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2792 &data_written, &add_status);
2793 }
2794
2795 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2796 flash_cmd.dma);
2797 if (status) {
2798 dev_err(&adapter->pdev->dev,
2799 "Firmware load error. "
2800 "Status code: 0x%x Additional Status: 0x%x\n",
2801 status, add_status);
2802 goto lancer_fw_exit;
2803 }
2804
2805 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2806lancer_fw_exit:
2807 return status;
2808}
2809
2810static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2811{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002812 struct flash_file_hdr_g2 *fhdr;
2813 struct flash_file_hdr_g3 *fhdr3;
2814 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002815 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002816 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002817 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002818
2819 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002820 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002821
Ajit Khaparde84517482009-09-04 03:12:16 +00002822 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002823 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2824 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002825 if (!flash_cmd.va) {
2826 status = -ENOMEM;
2827 dev_err(&adapter->pdev->dev,
2828 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002829 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002830 }
2831
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002832 if ((adapter->generation == BE_GEN3) &&
2833 (get_ufigen_type(fhdr) == BE_GEN3)) {
2834 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002835 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2836 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002837 img_hdr_ptr = (struct image_hdr *) (fw->data +
2838 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002839 i * sizeof(struct image_hdr)));
2840 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2841 status = be_flash_data(adapter, fw, &flash_cmd,
2842 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002843 }
2844 } else if ((adapter->generation == BE_GEN2) &&
2845 (get_ufigen_type(fhdr) == BE_GEN2)) {
2846 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2847 } else {
2848 dev_err(&adapter->pdev->dev,
2849 "UFI and Interface are not compatible for flashing\n");
2850 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002851 }
2852
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002853 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2854 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002855 if (status) {
2856 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002857 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002858 }
2859
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002860 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002861
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002862be_fw_exit:
2863 return status;
2864}
2865
2866int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2867{
2868 const struct firmware *fw;
2869 int status;
2870
2871 if (!netif_running(adapter->netdev)) {
2872 dev_err(&adapter->pdev->dev,
2873 "Firmware load not allowed (interface is down)\n");
2874 return -1;
2875 }
2876
2877 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2878 if (status)
2879 goto fw_exit;
2880
2881 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2882
2883 if (lancer_chip(adapter))
2884 status = lancer_fw_download(adapter, fw);
2885 else
2886 status = be_fw_download(adapter, fw);
2887
Ajit Khaparde84517482009-09-04 03:12:16 +00002888fw_exit:
2889 release_firmware(fw);
2890 return status;
2891}
2892
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002893static struct net_device_ops be_netdev_ops = {
2894 .ndo_open = be_open,
2895 .ndo_stop = be_close,
2896 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002897 .ndo_set_rx_mode = be_set_multicast_list,
2898 .ndo_set_mac_address = be_mac_addr_set,
2899 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00002900 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002901 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002902 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2903 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002904 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002905 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002906 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002907 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002908};
2909
2910static void be_netdev_init(struct net_device *netdev)
2911{
2912 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002913 struct be_rx_obj *rxo;
2914 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002915
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002916 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002917 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2918 NETIF_F_HW_VLAN_TX;
2919 if (be_multi_rxq(adapter))
2920 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002921
2922 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002923 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002924
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002925 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002926 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002927
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002928 netdev->flags |= IFF_MULTICAST;
2929
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002930 /* Default settings for Rx and Tx flow control */
2931 adapter->rx_fc = true;
2932 adapter->tx_fc = true;
2933
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002934 netif_set_gso_max_size(netdev, 65535);
2935
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002936 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2937
2938 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2939
Sathya Perla3abcded2010-10-03 22:12:27 -07002940 for_all_rx_queues(adapter, rxo, i)
2941 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2942 BE_NAPI_WEIGHT);
2943
Sathya Perla5fb379e2009-06-18 00:02:59 +00002944 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002945 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002946}
2947
2948static void be_unmap_pci_bars(struct be_adapter *adapter)
2949{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002950 if (adapter->csr)
2951 iounmap(adapter->csr);
2952 if (adapter->db)
2953 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002954 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002955 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002956}
2957
2958static int be_map_pci_bars(struct be_adapter *adapter)
2959{
2960 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002961 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002962
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002963 if (lancer_chip(adapter)) {
2964 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2965 pci_resource_len(adapter->pdev, 0));
2966 if (addr == NULL)
2967 return -ENOMEM;
2968 adapter->db = addr;
2969 return 0;
2970 }
2971
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002972 if (be_physfn(adapter)) {
2973 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2974 pci_resource_len(adapter->pdev, 2));
2975 if (addr == NULL)
2976 return -ENOMEM;
2977 adapter->csr = addr;
2978 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002979
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002980 if (adapter->generation == BE_GEN2) {
2981 pcicfg_reg = 1;
2982 db_reg = 4;
2983 } else {
2984 pcicfg_reg = 0;
2985 if (be_physfn(adapter))
2986 db_reg = 4;
2987 else
2988 db_reg = 0;
2989 }
2990 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2991 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002992 if (addr == NULL)
2993 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002994 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002995
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002996 if (be_physfn(adapter)) {
2997 addr = ioremap_nocache(
2998 pci_resource_start(adapter->pdev, pcicfg_reg),
2999 pci_resource_len(adapter->pdev, pcicfg_reg));
3000 if (addr == NULL)
3001 goto pci_map_err;
3002 adapter->pcicfg = addr;
3003 } else
3004 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003005
3006 return 0;
3007pci_map_err:
3008 be_unmap_pci_bars(adapter);
3009 return -ENOMEM;
3010}
3011
3012
3013static void be_ctrl_cleanup(struct be_adapter *adapter)
3014{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003015 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003016
3017 be_unmap_pci_bars(adapter);
3018
3019 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003020 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3021 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003022
Sathya Perla5b8821b2011-08-02 19:57:44 +00003023 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003024 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003025 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3026 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003027}
3028
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003029static int be_ctrl_init(struct be_adapter *adapter)
3030{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003031 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3032 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003033 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003034 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003035
3036 status = be_map_pci_bars(adapter);
3037 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003038 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003039
3040 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003041 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3042 mbox_mem_alloc->size,
3043 &mbox_mem_alloc->dma,
3044 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003045 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003046 status = -ENOMEM;
3047 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003048 }
3049 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3050 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3051 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3052 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003053
Sathya Perla5b8821b2011-08-02 19:57:44 +00003054 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3055 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3056 &rx_filter->dma, GFP_KERNEL);
3057 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003058 status = -ENOMEM;
3059 goto free_mbox;
3060 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003061 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003062
Ivan Vecera29849612010-12-14 05:43:19 +00003063 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003064 spin_lock_init(&adapter->mcc_lock);
3065 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003066
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003067 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003068 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003069 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003070
3071free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003072 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3073 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003074
3075unmap_pci_bars:
3076 be_unmap_pci_bars(adapter);
3077
3078done:
3079 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080}
3081
3082static void be_stats_cleanup(struct be_adapter *adapter)
3083{
Sathya Perla3abcded2010-10-03 22:12:27 -07003084 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003085
3086 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003087 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3088 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003089}
3090
3091static int be_stats_init(struct be_adapter *adapter)
3092{
Sathya Perla3abcded2010-10-03 22:12:27 -07003093 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003094
Selvin Xavier005d5692011-05-16 07:36:35 +00003095 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003096 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003097 } else {
3098 if (lancer_chip(adapter))
3099 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3100 else
3101 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3102 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003103 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3104 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003105 if (cmd->va == NULL)
3106 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003107 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003108 return 0;
3109}
3110
3111static void __devexit be_remove(struct pci_dev *pdev)
3112{
3113 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003114
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003115 if (!adapter)
3116 return;
3117
Somnath Koturf203af72010-10-25 23:01:03 +00003118 cancel_delayed_work_sync(&adapter->work);
3119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120 unregister_netdev(adapter->netdev);
3121
Sathya Perla5fb379e2009-06-18 00:02:59 +00003122 be_clear(adapter);
3123
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003124 be_stats_cleanup(adapter);
3125
3126 be_ctrl_cleanup(adapter);
3127
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003128 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003129 be_sriov_disable(adapter);
3130
Sathya Perla8d56ff12009-11-22 22:02:26 +00003131 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003132
3133 pci_set_drvdata(pdev, NULL);
3134 pci_release_regions(pdev);
3135 pci_disable_device(pdev);
3136
3137 free_netdev(adapter->netdev);
3138}
3139
Sathya Perla2243e2e2009-11-22 22:02:03 +00003140static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003141{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003142 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003143 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003144
Sathya Perla8788fdc2009-07-27 22:52:03 +00003145 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003146 if (status)
3147 return status;
3148
Sathya Perla3abcded2010-10-03 22:12:27 -07003149 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3150 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003151 if (status)
3152 return status;
3153
3154 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003155
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003156 /* A default permanent address is given to each VF for Lancer*/
3157 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003158 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003159 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003160
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003161 if (status)
3162 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003163
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003164 if (!is_valid_ether_addr(mac))
3165 return -EADDRNOTAVAIL;
3166
3167 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3168 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3169 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003170
Ajit Khaparde3486be22010-07-23 02:04:54 +00003171 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003172 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3173 else
3174 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3175
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003176 status = be_cmd_get_cntl_attributes(adapter);
3177 if (status)
3178 return status;
3179
Sathya Perla3c8def92011-06-12 20:01:58 +00003180 if ((num_vfs && adapter->sriov_enabled) ||
3181 (adapter->function_mode & 0x400) ||
3182 lancer_chip(adapter) || !be_physfn(adapter)) {
3183 adapter->num_tx_qs = 1;
3184 netif_set_real_num_tx_queues(adapter->netdev,
3185 adapter->num_tx_qs);
3186 } else {
3187 adapter->num_tx_qs = MAX_TX_QS;
3188 }
3189
Sathya Perla2243e2e2009-11-22 22:02:03 +00003190 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003191}
3192
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003193static int be_dev_family_check(struct be_adapter *adapter)
3194{
3195 struct pci_dev *pdev = adapter->pdev;
3196 u32 sli_intf = 0, if_type;
3197
3198 switch (pdev->device) {
3199 case BE_DEVICE_ID1:
3200 case OC_DEVICE_ID1:
3201 adapter->generation = BE_GEN2;
3202 break;
3203 case BE_DEVICE_ID2:
3204 case OC_DEVICE_ID2:
3205 adapter->generation = BE_GEN3;
3206 break;
3207 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003208 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003209 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3210 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3211 SLI_INTF_IF_TYPE_SHIFT;
3212
3213 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3214 if_type != 0x02) {
3215 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3216 return -EINVAL;
3217 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003218 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3219 SLI_INTF_FAMILY_SHIFT);
3220 adapter->generation = BE_GEN3;
3221 break;
3222 default:
3223 adapter->generation = 0;
3224 }
3225 return 0;
3226}
3227
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003228static int lancer_wait_ready(struct be_adapter *adapter)
3229{
3230#define SLIPORT_READY_TIMEOUT 500
3231 u32 sliport_status;
3232 int status = 0, i;
3233
3234 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3235 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3236 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3237 break;
3238
3239 msleep(20);
3240 }
3241
3242 if (i == SLIPORT_READY_TIMEOUT)
3243 status = -1;
3244
3245 return status;
3246}
3247
3248static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3249{
3250 int status;
3251 u32 sliport_status, err, reset_needed;
3252 status = lancer_wait_ready(adapter);
3253 if (!status) {
3254 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3255 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3256 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3257 if (err && reset_needed) {
3258 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3259 adapter->db + SLIPORT_CONTROL_OFFSET);
3260
3261 /* check adapter has corrected the error */
3262 status = lancer_wait_ready(adapter);
3263 sliport_status = ioread32(adapter->db +
3264 SLIPORT_STATUS_OFFSET);
3265 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3266 SLIPORT_STATUS_RN_MASK);
3267 if (status || sliport_status)
3268 status = -1;
3269 } else if (err || reset_needed) {
3270 status = -1;
3271 }
3272 }
3273 return status;
3274}
3275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003276static int __devinit be_probe(struct pci_dev *pdev,
3277 const struct pci_device_id *pdev_id)
3278{
3279 int status = 0;
3280 struct be_adapter *adapter;
3281 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003282
3283 status = pci_enable_device(pdev);
3284 if (status)
3285 goto do_none;
3286
3287 status = pci_request_regions(pdev, DRV_NAME);
3288 if (status)
3289 goto disable_dev;
3290 pci_set_master(pdev);
3291
Sathya Perla3c8def92011-06-12 20:01:58 +00003292 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003293 if (netdev == NULL) {
3294 status = -ENOMEM;
3295 goto rel_reg;
3296 }
3297 adapter = netdev_priv(netdev);
3298 adapter->pdev = pdev;
3299 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003300
3301 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003302 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003303 goto free_netdev;
3304
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003305 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003306 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003307
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003308 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309 if (!status) {
3310 netdev->features |= NETIF_F_HIGHDMA;
3311 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003312 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003313 if (status) {
3314 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3315 goto free_netdev;
3316 }
3317 }
3318
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003319 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003320 if (adapter->sriov_enabled) {
3321 adapter->vf_cfg = kcalloc(num_vfs,
3322 sizeof(struct be_vf_cfg), GFP_KERNEL);
3323
3324 if (!adapter->vf_cfg)
3325 goto free_netdev;
3326 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003327
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003328 status = be_ctrl_init(adapter);
3329 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003330 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003331
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003332 if (lancer_chip(adapter)) {
3333 status = lancer_test_and_set_rdy_state(adapter);
3334 if (status) {
3335 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003336 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003337 }
3338 }
3339
Sathya Perla2243e2e2009-11-22 22:02:03 +00003340 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003341 if (be_physfn(adapter)) {
3342 status = be_cmd_POST(adapter);
3343 if (status)
3344 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003345 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003346
3347 /* tell fw we're ready to fire cmds */
3348 status = be_cmd_fw_init(adapter);
3349 if (status)
3350 goto ctrl_clean;
3351
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003352 status = be_cmd_reset_function(adapter);
3353 if (status)
3354 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003355
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003356 status = be_stats_init(adapter);
3357 if (status)
3358 goto ctrl_clean;
3359
Sathya Perla2243e2e2009-11-22 22:02:03 +00003360 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003361 if (status)
3362 goto stats_clean;
3363
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003364 /* The INTR bit may be set in the card when probed by a kdump kernel
3365 * after a crash.
3366 */
3367 if (!lancer_chip(adapter))
3368 be_intr_set(adapter, false);
3369
Sathya Perla3abcded2010-10-03 22:12:27 -07003370 be_msix_enable(adapter);
3371
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003372 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003373
Sathya Perla5fb379e2009-06-18 00:02:59 +00003374 status = be_setup(adapter);
3375 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003376 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003377
Sathya Perla3abcded2010-10-03 22:12:27 -07003378 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003379 status = register_netdev(netdev);
3380 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003381 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003382
Ajit Khapardee6319362011-02-11 13:35:41 +00003383 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003384 u8 mac_speed;
Ajit Khaparded0381c42011-04-19 12:11:55 +00003385 u16 vf, lnk_speed;
3386
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003387 if (!lancer_chip(adapter)) {
3388 status = be_vf_eth_addr_config(adapter);
3389 if (status)
3390 goto unreg_netdev;
3391 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003392
3393 for (vf = 0; vf < num_vfs; vf++) {
Sathya Perlaea172a02011-08-02 19:57:42 +00003394 status = be_cmd_link_status_query(adapter, &mac_speed,
3395 &lnk_speed, vf + 1);
Ajit Khaparded0381c42011-04-19 12:11:55 +00003396 if (!status)
3397 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3398 else
3399 goto unreg_netdev;
3400 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003401 }
3402
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003403 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003404
Somnath Koturf203af72010-10-25 23:01:03 +00003405 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003406 return 0;
3407
Ajit Khapardee6319362011-02-11 13:35:41 +00003408unreg_netdev:
3409 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003410unsetup:
3411 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003412msix_disable:
3413 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003414stats_clean:
3415 be_stats_cleanup(adapter);
3416ctrl_clean:
3417 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003418free_vf_cfg:
3419 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003420free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003421 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003422 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003423 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003424rel_reg:
3425 pci_release_regions(pdev);
3426disable_dev:
3427 pci_disable_device(pdev);
3428do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003429 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003430 return status;
3431}
3432
3433static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3434{
3435 struct be_adapter *adapter = pci_get_drvdata(pdev);
3436 struct net_device *netdev = adapter->netdev;
3437
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003438 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003439 if (adapter->wol)
3440 be_setup_wol(adapter, true);
3441
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003442 netif_device_detach(netdev);
3443 if (netif_running(netdev)) {
3444 rtnl_lock();
3445 be_close(netdev);
3446 rtnl_unlock();
3447 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003448 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003449 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003450
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003451 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003452 pci_save_state(pdev);
3453 pci_disable_device(pdev);
3454 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3455 return 0;
3456}
3457
3458static int be_resume(struct pci_dev *pdev)
3459{
3460 int status = 0;
3461 struct be_adapter *adapter = pci_get_drvdata(pdev);
3462 struct net_device *netdev = adapter->netdev;
3463
3464 netif_device_detach(netdev);
3465
3466 status = pci_enable_device(pdev);
3467 if (status)
3468 return status;
3469
3470 pci_set_power_state(pdev, 0);
3471 pci_restore_state(pdev);
3472
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003473 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003474 /* tell fw we're ready to fire cmds */
3475 status = be_cmd_fw_init(adapter);
3476 if (status)
3477 return status;
3478
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003479 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003480 if (netif_running(netdev)) {
3481 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003482 be_open(netdev);
3483 rtnl_unlock();
3484 }
3485 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003486
3487 if (adapter->wol)
3488 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003489
3490 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003491 return 0;
3492}
3493
Sathya Perla82456b02010-02-17 01:35:37 +00003494/*
3495 * An FLR will stop BE from DMAing any data.
3496 */
3497static void be_shutdown(struct pci_dev *pdev)
3498{
3499 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003500
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003501 if (!adapter)
3502 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003503
Sathya Perla0f4a6822011-03-21 20:49:28 +00003504 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003505
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003506 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003507
Sathya Perla82456b02010-02-17 01:35:37 +00003508 if (adapter->wol)
3509 be_setup_wol(adapter, true);
3510
Ajit Khaparde57841862011-04-06 18:08:43 +00003511 be_cmd_reset_function(adapter);
3512
Sathya Perla82456b02010-02-17 01:35:37 +00003513 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003514}
3515
Sathya Perlacf588472010-02-14 21:22:01 +00003516static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3517 pci_channel_state_t state)
3518{
3519 struct be_adapter *adapter = pci_get_drvdata(pdev);
3520 struct net_device *netdev = adapter->netdev;
3521
3522 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3523
3524 adapter->eeh_err = true;
3525
3526 netif_device_detach(netdev);
3527
3528 if (netif_running(netdev)) {
3529 rtnl_lock();
3530 be_close(netdev);
3531 rtnl_unlock();
3532 }
3533 be_clear(adapter);
3534
3535 if (state == pci_channel_io_perm_failure)
3536 return PCI_ERS_RESULT_DISCONNECT;
3537
3538 pci_disable_device(pdev);
3539
3540 return PCI_ERS_RESULT_NEED_RESET;
3541}
3542
3543static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3544{
3545 struct be_adapter *adapter = pci_get_drvdata(pdev);
3546 int status;
3547
3548 dev_info(&adapter->pdev->dev, "EEH reset\n");
3549 adapter->eeh_err = false;
3550
3551 status = pci_enable_device(pdev);
3552 if (status)
3553 return PCI_ERS_RESULT_DISCONNECT;
3554
3555 pci_set_master(pdev);
3556 pci_set_power_state(pdev, 0);
3557 pci_restore_state(pdev);
3558
3559 /* Check if card is ok and fw is ready */
3560 status = be_cmd_POST(adapter);
3561 if (status)
3562 return PCI_ERS_RESULT_DISCONNECT;
3563
3564 return PCI_ERS_RESULT_RECOVERED;
3565}
3566
3567static void be_eeh_resume(struct pci_dev *pdev)
3568{
3569 int status = 0;
3570 struct be_adapter *adapter = pci_get_drvdata(pdev);
3571 struct net_device *netdev = adapter->netdev;
3572
3573 dev_info(&adapter->pdev->dev, "EEH resume\n");
3574
3575 pci_save_state(pdev);
3576
3577 /* tell fw we're ready to fire cmds */
3578 status = be_cmd_fw_init(adapter);
3579 if (status)
3580 goto err;
3581
3582 status = be_setup(adapter);
3583 if (status)
3584 goto err;
3585
3586 if (netif_running(netdev)) {
3587 status = be_open(netdev);
3588 if (status)
3589 goto err;
3590 }
3591 netif_device_attach(netdev);
3592 return;
3593err:
3594 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003595}
3596
3597static struct pci_error_handlers be_eeh_handlers = {
3598 .error_detected = be_eeh_err_detected,
3599 .slot_reset = be_eeh_reset,
3600 .resume = be_eeh_resume,
3601};
3602
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003603static struct pci_driver be_driver = {
3604 .name = DRV_NAME,
3605 .id_table = be_dev_ids,
3606 .probe = be_probe,
3607 .remove = be_remove,
3608 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003609 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003610 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003611 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003612};
3613
3614static int __init be_init_module(void)
3615{
Joe Perches8e95a202009-12-03 07:58:21 +00003616 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3617 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003618 printk(KERN_WARNING DRV_NAME
3619 " : Module param rx_frag_size must be 2048/4096/8192."
3620 " Using 2048\n");
3621 rx_frag_size = 2048;
3622 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003623
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003624 return pci_register_driver(&be_driver);
3625}
3626module_init(be_init_module);
3627
3628static void __exit be_exit_module(void)
3629{
3630 pci_unregister_driver(&be_driver);
3631}
3632module_exit(be_exit_module);