blob: 7c98d8e99508aaaa0592213f85d318b5f794eb34 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147
Sathya Perlacf588472010-02-14 21:22:01 +0000148 if (adapter->eeh_err)
149 return;
150
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 iowrite32(reg, addr);
159}
160
Sathya Perla8788fdc2009-07-27 22:52:03 +0000161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000166
167 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000176
177 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179}
180
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000188
189 if (adapter->eeh_err)
190 return;
191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000207
208 if (adapter->eeh_err)
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
Ajit Khapardef8617e02011-02-11 13:36:37 +0000232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000234 if (status)
235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000238 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000239netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000246static void populate_be2_stats(struct be_adapter *adapter)
247{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000251 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000254
Sathya Perlaac124ff2011-07-25 19:10:14 +0000255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000281 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000282 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000283 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
Selvin Xavier005d5692011-05-16 07:36:35 +0000340static void populate_lancer_stats(struct be_adapter *adapter)
341{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000342
Selvin Xavier005d5692011-05-16 07:36:35 +0000343 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000374 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000376}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377
378void be_parse_stats(struct be_adapter *adapter)
379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397}
398
Sathya Perlaab1594e2011-07-25 19:10:15 +0000399static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700401{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000402 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700404 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000405 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000406 u64 pkts, bytes;
407 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700408 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409
Sathya Perla3abcded2010-10-03 22:12:27 -0700410 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700422 }
423
Sathya Perla3c8def92011-06-12 20:01:58 +0000424 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000433 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700434
435 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000445 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700446
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700447 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000448 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000451
Sathya Perlaab1594e2011-07-25 19:10:15 +0000452 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700453
454 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000456
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000462 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463}
464
Sathya Perlaea172a02011-08-02 19:57:42 +0000465void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700466{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467 struct net_device *netdev = adapter->netdev;
468
Sathya Perlaea172a02011-08-02 19:57:42 +0000469 /* when link status changes, link speed must be re-queried from card */
470 adapter->link_speed = -1;
471 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
472 netif_carrier_on(netdev);
473 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
474 } else {
475 netif_carrier_off(netdev);
476 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700477 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700478}
479
Sathya Perla3c8def92011-06-12 20:01:58 +0000480static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000481 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482{
Sathya Perla3c8def92011-06-12 20:01:58 +0000483 struct be_tx_stats *stats = tx_stats(txo);
484
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000486 stats->tx_reqs++;
487 stats->tx_wrbs += wrb_cnt;
488 stats->tx_bytes += copied;
489 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493}
494
495/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000496static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
497 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700499 int cnt = (skb->len > skb->data_len);
500
501 cnt += skb_shinfo(skb)->nr_frags;
502
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503 /* to account for hdr wrb */
504 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000505 if (lancer_chip(adapter) || !(cnt & 1)) {
506 *dummy = false;
507 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508 /* add a dummy to make it an even num */
509 cnt++;
510 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
513 return cnt;
514}
515
516static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
517{
518 wrb->frag_pa_hi = upper_32_bits(addr);
519 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
520 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
521}
522
Somnath Koturcc4ce022010-10-21 07:11:14 -0700523static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
524 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700526 u8 vlan_prio = 0;
527 u16 vlan_tag = 0;
528
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529 memset(hdr, 0, sizeof(*hdr));
530
531 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
532
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000533 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
535 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
536 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000537 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000539 if (lancer_chip(adapter) && adapter->sli_family ==
540 LANCER_A0_SLI_FAMILY) {
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
542 if (is_tcp_pkt(skb))
543 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
544 tcpcs, hdr, 1);
545 else if (is_udp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 udpcs, hdr, 1);
548 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
550 if (is_tcp_pkt(skb))
551 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
552 else if (is_udp_pkt(skb))
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
554 }
555
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700556 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700558 vlan_tag = vlan_tx_tag_get(skb);
559 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
560 /* If vlan priority provided by OS is NOT in available bmap */
561 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
562 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
563 adapter->recommended_prio;
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700565 }
566
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
571}
572
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000573static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000574 bool unmap_single)
575{
576 dma_addr_t dma;
577
578 be_dws_le_to_cpu(wrb, sizeof(*wrb));
579
580 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000581 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000582 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000583 dma_unmap_single(dev, dma, wrb->frag_len,
584 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000585 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000586 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000587 }
588}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589
Sathya Perla3c8def92011-06-12 20:01:58 +0000590static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
592{
Sathya Perla7101e112010-03-22 20:41:12 +0000593 dma_addr_t busaddr;
594 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000595 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 struct be_eth_wrb *wrb;
598 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000599 bool map_single = false;
600 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602 hdr = queue_head_node(txq);
603 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000604 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605
David S. Millerebc8d2a2009-06-09 01:01:31 -0700606 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700607 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000608 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
609 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000610 goto dma_err;
611 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700612 wrb = queue_head_node(txq);
613 wrb_fill(wrb, busaddr, len);
614 be_dws_cpu_to_le(wrb, sizeof(*wrb));
615 queue_head_inc(txq);
616 copied += len;
617 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618
David S. Millerebc8d2a2009-06-09 01:01:31 -0700619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
620 struct skb_frag_struct *frag =
621 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000622 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
623 frag->size, DMA_TO_DEVICE);
624 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000625 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700626 wrb = queue_head_node(txq);
627 wrb_fill(wrb, busaddr, frag->size);
628 be_dws_cpu_to_le(wrb, sizeof(*wrb));
629 queue_head_inc(txq);
630 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631 }
632
633 if (dummy_wrb) {
634 wrb = queue_head_node(txq);
635 wrb_fill(wrb, 0, 0);
636 be_dws_cpu_to_le(wrb, sizeof(*wrb));
637 queue_head_inc(txq);
638 }
639
Somnath Koturcc4ce022010-10-21 07:11:14 -0700640 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 be_dws_cpu_to_le(hdr, sizeof(*hdr));
642
643 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000644dma_err:
645 txq->head = map_head;
646 while (copied) {
647 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000648 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000649 map_single = false;
650 copied -= wrb->frag_len;
651 queue_head_inc(txq);
652 }
653 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654}
655
Stephen Hemminger613573252009-08-31 19:50:58 +0000656static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700657 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658{
659 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000660 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
661 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 u32 wrb_cnt = 0, copied = 0;
663 u32 start = txq->head;
664 bool dummy_wrb, stopped = false;
665
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000666 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667
Sathya Perla3c8def92011-06-12 20:01:58 +0000668 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000669 if (copied) {
670 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000671 BUG_ON(txo->sent_skb_list[start]);
672 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000674 /* Ensure txq has space for the next skb; Else stop the queue
675 * *BEFORE* ringing the tx doorbell, so that we serialze the
676 * tx compls of the current transmit which'll wake up the queue
677 */
Sathya Perla7101e112010-03-22 20:41:12 +0000678 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000679 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
680 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000681 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000682 stopped = true;
683 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000685 be_txq_notify(adapter, txq->id, wrb_cnt);
686
Sathya Perla3c8def92011-06-12 20:01:58 +0000687 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000688 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000689 } else {
690 txq->head = start;
691 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 return NETDEV_TX_OK;
694}
695
696static int be_change_mtu(struct net_device *netdev, int new_mtu)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000700 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
701 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 dev_info(&adapter->pdev->dev,
703 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000704 BE_MIN_MTU,
705 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706 return -EINVAL;
707 }
708 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
709 netdev->mtu, new_mtu);
710 netdev->mtu = new_mtu;
711 return 0;
712}
713
714/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000715 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
716 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000718static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 u16 vtag[BE_NUM_VLANS_SUPPORTED];
721 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000722 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000723 u32 if_handle;
724
725 if (vf) {
726 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
727 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
728 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
729 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000731 /* No need to further configure vids if in promiscuous mode */
732 if (adapter->promiscuous)
733 return 0;
734
Ajit Khaparde82903e42010-02-09 01:34:57 +0000735 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000737 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738 if (adapter->vlan_tag[i]) {
739 vtag[ntags] = cpu_to_le16(i);
740 ntags++;
741 }
742 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700743 status = be_cmd_vlan_config(adapter, adapter->if_handle,
744 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700746 status = be_cmd_vlan_config(adapter, adapter->if_handle,
747 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000749
Sathya Perlab31c50a2009-09-17 10:30:13 -0700750 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751}
752
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
754{
755 struct be_adapter *adapter = netdev_priv(netdev);
756
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000757 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000758 if (!be_physfn(adapter))
759 return;
760
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000762 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000763 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764}
765
766static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000770 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000771
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000772 if (!be_physfn(adapter))
773 return;
774
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000776 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000777 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778}
779
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780static void be_set_multicast_list(struct net_device *netdev)
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
783
784 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000785 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000786 adapter->promiscuous = true;
787 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000789
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300790 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000791 if (adapter->promiscuous) {
792 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000793 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794
795 if (adapter->vlans_added)
796 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000797 }
798
Sathya Perlae7b909a2009-11-22 22:01:10 +0000799 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000800 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000801 netdev_mc_count(netdev) > BE_MAX_MC) {
802 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000803 goto done;
804 }
805
Sathya Perla5b8821b2011-08-02 19:57:44 +0000806 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000807done:
808 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809}
810
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000811static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
812{
813 struct be_adapter *adapter = netdev_priv(netdev);
814 int status;
815
816 if (!adapter->sriov_enabled)
817 return -EPERM;
818
819 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
820 return -EINVAL;
821
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000822 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
823 status = be_cmd_pmac_del(adapter,
824 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000825 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000826
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000827 status = be_cmd_pmac_add(adapter, mac,
828 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000829 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000830
831 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000832 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
833 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000834 else
835 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
836
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000837 return status;
838}
839
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000840static int be_get_vf_config(struct net_device *netdev, int vf,
841 struct ifla_vf_info *vi)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (vf >= num_vfs)
849 return -EINVAL;
850
851 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000852 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000853 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000854 vi->qos = 0;
855 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
856
857 return 0;
858}
859
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000860static int be_set_vf_vlan(struct net_device *netdev,
861 int vf, u16 vlan, u8 qos)
862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864 int status = 0;
865
866 if (!adapter->sriov_enabled)
867 return -EPERM;
868
869 if ((vf >= num_vfs) || (vlan > 4095))
870 return -EINVAL;
871
872 if (vlan) {
873 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
874 adapter->vlans_added++;
875 } else {
876 adapter->vf_cfg[vf].vf_vlan_tag = 0;
877 adapter->vlans_added--;
878 }
879
880 status = be_vid_config(adapter, true, vf);
881
882 if (status)
883 dev_info(&adapter->pdev->dev,
884 "VLAN %d config on VF %d failed\n", vlan, vf);
885 return status;
886}
887
Ajit Khapardee1d18732010-07-23 01:52:13 +0000888static int be_set_vf_tx_rate(struct net_device *netdev,
889 int vf, int rate)
890{
891 struct be_adapter *adapter = netdev_priv(netdev);
892 int status = 0;
893
894 if (!adapter->sriov_enabled)
895 return -EPERM;
896
897 if ((vf >= num_vfs) || (rate < 0))
898 return -EINVAL;
899
900 if (rate > 10000)
901 rate = 10000;
902
903 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000904 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000905
906 if (status)
907 dev_info(&adapter->pdev->dev,
908 "tx rate %d on VF %d failed\n", rate, vf);
909 return status;
910}
911
Sathya Perlaac124ff2011-07-25 19:10:14 +0000912static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700913{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000914 struct be_eq_obj *rx_eq = &rxo->rx_eq;
915 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700916 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000917 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000918 u64 pkts;
919 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000920
921 if (!rx_eq->enable_aic)
922 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700923
Sathya Perla4097f662009-03-24 16:40:13 -0700924 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700925 if (time_before(now, stats->rx_jiffies)) {
926 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700927 return;
928 }
929
Sathya Perlaac124ff2011-07-25 19:10:14 +0000930 /* Update once a second */
931 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700932 return;
933
Sathya Perlaab1594e2011-07-25 19:10:15 +0000934 do {
935 start = u64_stats_fetch_begin_bh(&stats->sync);
936 pkts = stats->rx_pkts;
937 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
938
Eric Dumazet68c3e5a2011-08-09 06:23:07 +0000939 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000940 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700941 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000942 eqd = stats->rx_pps / 110000;
943 eqd = eqd << 3;
944 if (eqd > rx_eq->max_eqd)
945 eqd = rx_eq->max_eqd;
946 if (eqd < rx_eq->min_eqd)
947 eqd = rx_eq->min_eqd;
948 if (eqd < 10)
949 eqd = 0;
950 if (eqd != rx_eq->cur_eqd) {
951 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
952 rx_eq->cur_eqd = eqd;
953 }
Sathya Perla4097f662009-03-24 16:40:13 -0700954}
955
Sathya Perla3abcded2010-10-03 22:12:27 -0700956static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000957 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700958{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000959 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700960
Sathya Perlaab1594e2011-07-25 19:10:15 +0000961 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700962 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000963 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700964 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000965 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700966 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000967 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000968 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000969 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970}
971
Sathya Perla2e588f82011-03-11 02:49:26 +0000972static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700973{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000974 /* L4 checksum is not reliable for non TCP/UDP packets.
975 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
977 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700978}
979
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700980static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700981get_rx_page_info(struct be_adapter *adapter,
982 struct be_rx_obj *rxo,
983 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984{
985 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700986 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987
Sathya Perla3abcded2010-10-03 22:12:27 -0700988 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989 BUG_ON(!rx_page_info->page);
990
Ajit Khaparde205859a2010-02-09 01:34:21 +0000991 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000992 dma_unmap_page(&adapter->pdev->dev,
993 dma_unmap_addr(rx_page_info, bus),
994 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000995 rx_page_info->last_page_user = false;
996 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700997
998 atomic_dec(&rxq->used);
999 return rx_page_info;
1000}
1001
1002/* Throwaway the data in the Rx completion */
1003static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001004 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001005 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006{
Sathya Perla3abcded2010-10-03 22:12:27 -07001007 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001009 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001011 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001012 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001013 put_page(page_info->page);
1014 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001015 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016 }
1017}
1018
1019/*
1020 * skb_fill_rx_data forms a complete skb for an ether frame
1021 * indicated by rxcp.
1022 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001023static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001024 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025{
Sathya Perla3abcded2010-10-03 22:12:27 -07001026 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001028 u16 i, j;
1029 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030 u8 *start;
1031
Sathya Perla2e588f82011-03-11 02:49:26 +00001032 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033 start = page_address(page_info->page) + page_info->page_offset;
1034 prefetch(start);
1035
1036 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038
1039 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001040 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041 memcpy(skb->data, start, hdr_len);
1042 skb->len = curr_frag_len;
1043 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1044 /* Complete packet has now been moved to data */
1045 put_page(page_info->page);
1046 skb->data_len = 0;
1047 skb->tail += curr_frag_len;
1048 } else {
1049 skb_shinfo(skb)->nr_frags = 1;
1050 skb_shinfo(skb)->frags[0].page = page_info->page;
1051 skb_shinfo(skb)->frags[0].page_offset =
1052 page_info->page_offset + hdr_len;
1053 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1054 skb->data_len = curr_frag_len - hdr_len;
1055 skb->tail += hdr_len;
1056 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001057 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058
Sathya Perla2e588f82011-03-11 02:49:26 +00001059 if (rxcp->pkt_size <= rx_frag_size) {
1060 BUG_ON(rxcp->num_rcvd != 1);
1061 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 }
1063
1064 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 remaining = rxcp->pkt_size - curr_frag_len;
1067 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1068 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1069 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001071 /* Coalesce all frags from the same physical page in one slot */
1072 if (page_info->page_offset == 0) {
1073 /* Fresh page */
1074 j++;
1075 skb_shinfo(skb)->frags[j].page = page_info->page;
1076 skb_shinfo(skb)->frags[j].page_offset =
1077 page_info->page_offset;
1078 skb_shinfo(skb)->frags[j].size = 0;
1079 skb_shinfo(skb)->nr_frags++;
1080 } else {
1081 put_page(page_info->page);
1082 }
1083
1084 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085 skb->len += curr_frag_len;
1086 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087
Sathya Perla2e588f82011-03-11 02:49:26 +00001088 remaining -= curr_frag_len;
1089 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001090 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001092 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093}
1094
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001095/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001097 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001098 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001100 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001102
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001103 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001104 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001105 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001106 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107 return;
1108 }
1109
Sathya Perla2e588f82011-03-11 02:49:26 +00001110 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001113 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001114 else
1115 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
1117 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001118 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001119 if (adapter->netdev->features & NETIF_F_RXHASH)
1120 skb->rxhash = rxcp->rss_hash;
1121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001123 if (unlikely(rxcp->vlanf))
1124 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1125
1126 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127}
1128
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001129/* Process the RX completion indicated by rxcp when GRO is enabled */
1130static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001131 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001132 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133{
1134 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001135 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001136 struct be_queue_info *rxq = &rxo->q;
1137 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001138 u16 remaining, curr_frag_len;
1139 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001140
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001141 skb = napi_get_frags(&eq_obj->napi);
1142 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001143 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001144 return;
1145 }
1146
Sathya Perla2e588f82011-03-11 02:49:26 +00001147 remaining = rxcp->pkt_size;
1148 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1149 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150
1151 curr_frag_len = min(remaining, rx_frag_size);
1152
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001153 /* Coalesce all frags from the same physical page in one slot */
1154 if (i == 0 || page_info->page_offset == 0) {
1155 /* First frag or Fresh page */
1156 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001157 skb_shinfo(skb)->frags[j].page = page_info->page;
1158 skb_shinfo(skb)->frags[j].page_offset =
1159 page_info->page_offset;
1160 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001161 } else {
1162 put_page(page_info->page);
1163 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001164 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001165
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001167 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168 memset(page_info, 0, sizeof(*page_info));
1169 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001170 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001172 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001173 skb->len = rxcp->pkt_size;
1174 skb->data_len = rxcp->pkt_size;
1175 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001176 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001177 if (adapter->netdev->features & NETIF_F_RXHASH)
1178 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001179
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001180 if (unlikely(rxcp->vlanf))
1181 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1182
1183 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184}
1185
Sathya Perla2e588f82011-03-11 02:49:26 +00001186static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1187 struct be_eth_rx_compl *compl,
1188 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189{
Sathya Perla2e588f82011-03-11 02:49:26 +00001190 rxcp->pkt_size =
1191 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1192 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1193 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1194 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001195 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001196 rxcp->ip_csum =
1197 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1198 rxcp->l4_csum =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1200 rxcp->ipv6 =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1202 rxcp->rxq_idx =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1204 rxcp->num_rcvd =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1206 rxcp->pkt_type =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001208 rxcp->rss_hash =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001210 if (rxcp->vlanf) {
1211 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001212 compl);
1213 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1214 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001215 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001216 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001217}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218
Sathya Perla2e588f82011-03-11 02:49:26 +00001219static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1220 struct be_eth_rx_compl *compl,
1221 struct be_rx_compl_info *rxcp)
1222{
1223 rxcp->pkt_size =
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1225 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1226 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1227 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001228 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 rxcp->ip_csum =
1230 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1231 rxcp->l4_csum =
1232 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1233 rxcp->ipv6 =
1234 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1235 rxcp->rxq_idx =
1236 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1237 rxcp->num_rcvd =
1238 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1239 rxcp->pkt_type =
1240 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001241 rxcp->rss_hash =
1242 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001243 if (rxcp->vlanf) {
1244 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001245 compl);
1246 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1247 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001248 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001249 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001250}
1251
1252static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1253{
1254 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1255 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1256 struct be_adapter *adapter = rxo->adapter;
1257
1258 /* For checking the valid bit it is Ok to use either definition as the
1259 * valid bit is at the same position in both v0 and v1 Rx compl */
1260 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261 return NULL;
1262
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001263 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001264 be_dws_le_to_cpu(compl, sizeof(*compl));
1265
1266 if (adapter->be3_native)
1267 be_parse_rx_compl_v1(adapter, compl, rxcp);
1268 else
1269 be_parse_rx_compl_v0(adapter, compl, rxcp);
1270
Sathya Perla15d72182011-03-21 20:49:26 +00001271 if (rxcp->vlanf) {
1272 /* vlanf could be wrongly set in some cards.
1273 * ignore if vtm is not set */
1274 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1275 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001276
Sathya Perla15d72182011-03-21 20:49:26 +00001277 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001278 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001279
David S. Miller3c709f82011-05-11 14:26:15 -04001280 if (((adapter->pvid & VLAN_VID_MASK) ==
1281 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1282 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001283 rxcp->vlanf = 0;
1284 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001285
1286 /* As the compl has been parsed, reset it; we wont touch it again */
1287 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288
Sathya Perla3abcded2010-10-03 22:12:27 -07001289 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290 return rxcp;
1291}
1292
Eric Dumazet1829b082011-03-01 05:48:12 +00001293static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001296
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001297 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001298 gfp |= __GFP_COMP;
1299 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300}
1301
1302/*
1303 * Allocate a page, split it to fragments of size rx_frag_size and post as
1304 * receive buffers to BE
1305 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001306static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307{
Sathya Perla3abcded2010-10-03 22:12:27 -07001308 struct be_adapter *adapter = rxo->adapter;
1309 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001310 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001311 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312 struct page *pagep = NULL;
1313 struct be_eth_rx_d *rxd;
1314 u64 page_dmaaddr = 0, frag_dmaaddr;
1315 u32 posted, page_offset = 0;
1316
Sathya Perla3abcded2010-10-03 22:12:27 -07001317 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1319 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001320 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001322 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001323 break;
1324 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001325 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1326 0, adapter->big_page_size,
1327 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001328 page_info->page_offset = 0;
1329 } else {
1330 get_page(pagep);
1331 page_info->page_offset = page_offset + rx_frag_size;
1332 }
1333 page_offset = page_info->page_offset;
1334 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001335 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1337
1338 rxd = queue_head_node(rxq);
1339 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1340 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341
1342 /* Any space left in the current big page for another frag? */
1343 if ((page_offset + rx_frag_size + rx_frag_size) >
1344 adapter->big_page_size) {
1345 pagep = NULL;
1346 page_info->last_page_user = true;
1347 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001348
1349 prev_page_info = page_info;
1350 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001351 page_info = &page_info_tbl[rxq->head];
1352 }
1353 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001354 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355
1356 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001358 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001359 } else if (atomic_read(&rxq->used) == 0) {
1360 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001361 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363}
1364
Sathya Perla5fb379e2009-06-18 00:02:59 +00001365static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1368
1369 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1370 return NULL;
1371
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001372 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1374
1375 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1376
1377 queue_tail_inc(tx_cq);
1378 return txcp;
1379}
1380
Sathya Perla3c8def92011-06-12 20:01:58 +00001381static u16 be_tx_compl_process(struct be_adapter *adapter,
1382 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383{
Sathya Perla3c8def92011-06-12 20:01:58 +00001384 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001385 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001386 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001388 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1389 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001391 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001393 sent_skbs[txq->tail] = NULL;
1394
1395 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001396 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001398 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001400 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001401 unmap_tx_frag(&adapter->pdev->dev, wrb,
1402 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001403 unmap_skb_hdr = false;
1404
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 num_wrbs++;
1406 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001407 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001410 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411}
1412
Sathya Perla859b1e42009-08-10 03:43:51 +00001413static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1414{
1415 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1416
1417 if (!eqe->evt)
1418 return NULL;
1419
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001420 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001421 eqe->evt = le32_to_cpu(eqe->evt);
1422 queue_tail_inc(&eq_obj->q);
1423 return eqe;
1424}
1425
1426static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001427 struct be_eq_obj *eq_obj,
1428 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001429{
1430 struct be_eq_entry *eqe;
1431 u16 num = 0;
1432
1433 while ((eqe = event_get(eq_obj)) != NULL) {
1434 eqe->evt = 0;
1435 num++;
1436 }
1437
1438 /* Deal with any spurious interrupts that come
1439 * without events
1440 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001441 if (!num)
1442 rearm = true;
1443
1444 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001445 if (num)
1446 napi_schedule(&eq_obj->napi);
1447
1448 return num;
1449}
1450
1451/* Just read and notify events without processing them.
1452 * Used at the time of destroying event queues */
1453static void be_eq_clean(struct be_adapter *adapter,
1454 struct be_eq_obj *eq_obj)
1455{
1456 struct be_eq_entry *eqe;
1457 u16 num = 0;
1458
1459 while ((eqe = event_get(eq_obj)) != NULL) {
1460 eqe->evt = 0;
1461 num++;
1462 }
1463
1464 if (num)
1465 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1466}
1467
Sathya Perla3abcded2010-10-03 22:12:27 -07001468static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469{
1470 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001471 struct be_queue_info *rxq = &rxo->q;
1472 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001473 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474 u16 tail;
1475
1476 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001477 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1478 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001479 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 }
1481
1482 /* Then free posted rx buffer that were not used */
1483 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001484 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001485 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 put_page(page_info->page);
1487 memset(page_info, 0, sizeof(*page_info));
1488 }
1489 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001490 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491}
1492
Sathya Perla3c8def92011-06-12 20:01:58 +00001493static void be_tx_compl_clean(struct be_adapter *adapter,
1494 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495{
Sathya Perla3c8def92011-06-12 20:01:58 +00001496 struct be_queue_info *tx_cq = &txo->cq;
1497 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001498 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001499 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001500 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001501 struct sk_buff *sent_skb;
1502 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503
Sathya Perlaa8e91792009-08-10 03:42:43 +00001504 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1505 do {
1506 while ((txcp = be_tx_compl_get(tx_cq))) {
1507 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1508 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001509 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001510 cmpl++;
1511 }
1512 if (cmpl) {
1513 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001514 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001515 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001516 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001517 }
1518
1519 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1520 break;
1521
1522 mdelay(1);
1523 } while (true);
1524
1525 if (atomic_read(&txq->used))
1526 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1527 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001528
1529 /* free posted tx for which compls will never arrive */
1530 while (atomic_read(&txq->used)) {
1531 sent_skb = sent_skbs[txq->tail];
1532 end_idx = txq->tail;
1533 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001534 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1535 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001536 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001537 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001538 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539}
1540
Sathya Perla5fb379e2009-06-18 00:02:59 +00001541static void be_mcc_queues_destroy(struct be_adapter *adapter)
1542{
1543 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001544
Sathya Perla8788fdc2009-07-27 22:52:03 +00001545 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001546 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001547 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001548 be_queue_free(adapter, q);
1549
Sathya Perla8788fdc2009-07-27 22:52:03 +00001550 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001551 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001552 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001553 be_queue_free(adapter, q);
1554}
1555
1556/* Must be called only after TX qs are created as MCC shares TX EQ */
1557static int be_mcc_queues_create(struct be_adapter *adapter)
1558{
1559 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001560
1561 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001562 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001563 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001564 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001565 goto err;
1566
1567 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001568 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001569 goto mcc_cq_free;
1570
1571 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001572 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001573 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1574 goto mcc_cq_destroy;
1575
1576 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001577 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001578 goto mcc_q_free;
1579
1580 return 0;
1581
1582mcc_q_free:
1583 be_queue_free(adapter, q);
1584mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001585 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001586mcc_cq_free:
1587 be_queue_free(adapter, cq);
1588err:
1589 return -1;
1590}
1591
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592static void be_tx_queues_destroy(struct be_adapter *adapter)
1593{
1594 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001595 struct be_tx_obj *txo;
1596 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597
Sathya Perla3c8def92011-06-12 20:01:58 +00001598 for_all_tx_queues(adapter, txo, i) {
1599 q = &txo->q;
1600 if (q->created)
1601 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1602 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603
Sathya Perla3c8def92011-06-12 20:01:58 +00001604 q = &txo->cq;
1605 if (q->created)
1606 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1607 be_queue_free(adapter, q);
1608 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609
Sathya Perla859b1e42009-08-10 03:43:51 +00001610 /* Clear any residual events */
1611 be_eq_clean(adapter, &adapter->tx_eq);
1612
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 q = &adapter->tx_eq.q;
1614 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001615 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 be_queue_free(adapter, q);
1617}
1618
Sathya Perla3c8def92011-06-12 20:01:58 +00001619/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620static int be_tx_queues_create(struct be_adapter *adapter)
1621{
1622 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001623 struct be_tx_obj *txo;
1624 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625
1626 adapter->tx_eq.max_eqd = 0;
1627 adapter->tx_eq.min_eqd = 0;
1628 adapter->tx_eq.cur_eqd = 96;
1629 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001630
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001632 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1633 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 return -1;
1635
Sathya Perla8788fdc2009-07-27 22:52:03 +00001636 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001637 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001638 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001639
Sathya Perla3c8def92011-06-12 20:01:58 +00001640 for_all_tx_queues(adapter, txo, i) {
1641 cq = &txo->cq;
1642 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001644 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645
Sathya Perla3c8def92011-06-12 20:01:58 +00001646 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1647 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648
Sathya Perla3c8def92011-06-12 20:01:58 +00001649 q = &txo->q;
1650 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1651 sizeof(struct be_eth_wrb)))
1652 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653
Sathya Perla3c8def92011-06-12 20:01:58 +00001654 if (be_cmd_txq_create(adapter, q, cq))
1655 goto err;
1656 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657 return 0;
1658
Sathya Perla3c8def92011-06-12 20:01:58 +00001659err:
1660 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661 return -1;
1662}
1663
1664static void be_rx_queues_destroy(struct be_adapter *adapter)
1665{
1666 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001667 struct be_rx_obj *rxo;
1668 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669
Sathya Perla3abcded2010-10-03 22:12:27 -07001670 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001671 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001672
Sathya Perla3abcded2010-10-03 22:12:27 -07001673 q = &rxo->cq;
1674 if (q->created)
1675 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1676 be_queue_free(adapter, q);
1677
Sathya Perla3abcded2010-10-03 22:12:27 -07001678 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001679 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001680 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001681 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683}
1684
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001685static u32 be_num_rxqs_want(struct be_adapter *adapter)
1686{
Sathya Perlac814fd32011-06-26 20:41:25 +00001687 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001688 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1689 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1690 } else {
1691 dev_warn(&adapter->pdev->dev,
1692 "No support for multiple RX queues\n");
1693 return 1;
1694 }
1695}
1696
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697static int be_rx_queues_create(struct be_adapter *adapter)
1698{
1699 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001700 struct be_rx_obj *rxo;
1701 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001703 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1704 msix_enabled(adapter) ?
1705 adapter->num_msix_vec - 1 : 1);
1706 if (adapter->num_rx_qs != MAX_RX_QS)
1707 dev_warn(&adapter->pdev->dev,
1708 "Can create only %d RX queues", adapter->num_rx_qs);
1709
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001711 for_all_rx_queues(adapter, rxo, i) {
1712 rxo->adapter = adapter;
1713 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1714 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715
Sathya Perla3abcded2010-10-03 22:12:27 -07001716 /* EQ */
1717 eq = &rxo->rx_eq.q;
1718 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1719 sizeof(struct be_eq_entry));
1720 if (rc)
1721 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
Sathya Perla3abcded2010-10-03 22:12:27 -07001723 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1724 if (rc)
1725 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001727 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001728
Sathya Perla3abcded2010-10-03 22:12:27 -07001729 /* CQ */
1730 cq = &rxo->cq;
1731 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1732 sizeof(struct be_eth_rx_compl));
1733 if (rc)
1734 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735
Sathya Perla3abcded2010-10-03 22:12:27 -07001736 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1737 if (rc)
1738 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001739
1740 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001741 q = &rxo->q;
1742 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1743 sizeof(struct be_eth_rx_d));
1744 if (rc)
1745 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746
Sathya Perla3abcded2010-10-03 22:12:27 -07001747 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748
1749 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001750err:
1751 be_rx_queues_destroy(adapter);
1752 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001755static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001756{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001757 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1758 if (!eqe->evt)
1759 return false;
1760 else
1761 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001762}
1763
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764static irqreturn_t be_intx(int irq, void *dev)
1765{
1766 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001768 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001770 if (lancer_chip(adapter)) {
1771 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001772 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001773 for_all_rx_queues(adapter, rxo, i) {
1774 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001775 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001776 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001778 if (!(tx || rx))
1779 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001780
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001781 } else {
1782 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1783 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1784 if (!isr)
1785 return IRQ_NONE;
1786
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001787 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001788 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001789
1790 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001791 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001792 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001793 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001794 }
Sathya Perlac001c212009-07-01 01:06:07 +00001795
Sathya Perla8788fdc2009-07-27 22:52:03 +00001796 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797}
1798
1799static irqreturn_t be_msix_rx(int irq, void *dev)
1800{
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 struct be_rx_obj *rxo = dev;
1802 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803
Sathya Perla3c8def92011-06-12 20:01:58 +00001804 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805
1806 return IRQ_HANDLED;
1807}
1808
Sathya Perla5fb379e2009-06-18 00:02:59 +00001809static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810{
1811 struct be_adapter *adapter = dev;
1812
Sathya Perla3c8def92011-06-12 20:01:58 +00001813 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814
1815 return IRQ_HANDLED;
1816}
1817
Sathya Perla2e588f82011-03-11 02:49:26 +00001818static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819{
Sathya Perla2e588f82011-03-11 02:49:26 +00001820 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821}
1822
stephen hemminger49b05222010-10-21 07:50:48 +00001823static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824{
1825 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001826 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1827 struct be_adapter *adapter = rxo->adapter;
1828 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001829 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 u32 work_done;
1831
Sathya Perlaac124ff2011-07-25 19:10:14 +00001832 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001834 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835 if (!rxcp)
1836 break;
1837
Sathya Perla12004ae2011-08-02 19:57:46 +00001838 /* Is it a flush compl that has no data */
1839 if (unlikely(rxcp->num_rcvd == 0))
1840 goto loop_continue;
1841
1842 /* Discard compl with partial DMA Lancer B0 */
1843 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001844 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001845 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001846 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001847
Sathya Perla12004ae2011-08-02 19:57:46 +00001848 /* On BE drop pkts that arrive due to imperfect filtering in
1849 * promiscuous mode on some skews
1850 */
1851 if (unlikely(rxcp->port != adapter->port_num &&
1852 !lancer_chip(adapter))) {
1853 be_rx_compl_discard(adapter, rxo, rxcp);
1854 goto loop_continue;
1855 }
1856
1857 if (do_gro(rxcp))
1858 be_rx_compl_process_gro(adapter, rxo, rxcp);
1859 else
1860 be_rx_compl_process(adapter, rxo, rxcp);
1861loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001862 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 }
1864
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001866 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001867 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868
1869 /* All consumed */
1870 if (work_done < budget) {
1871 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001872 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873 } else {
1874 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001875 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876 }
1877 return work_done;
1878}
1879
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001880/* As TX and MCC share the same EQ check for both TX and MCC completions.
1881 * For TX/MCC we don't honour budget; consume everything
1882 */
1883static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001885 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1886 struct be_adapter *adapter =
1887 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001888 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001890 int tx_compl, mcc_compl, status = 0;
1891 u8 i;
1892 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893
Sathya Perla3c8def92011-06-12 20:01:58 +00001894 for_all_tx_queues(adapter, txo, i) {
1895 tx_compl = 0;
1896 num_wrbs = 0;
1897 while ((txcp = be_tx_compl_get(&txo->cq))) {
1898 num_wrbs += be_tx_compl_process(adapter, txo,
1899 AMAP_GET_BITS(struct amap_eth_tx_compl,
1900 wrb_index, txcp));
1901 tx_compl++;
1902 }
1903 if (tx_compl) {
1904 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1905
1906 atomic_sub(num_wrbs, &txo->q.used);
1907
1908 /* As Tx wrbs have been freed up, wake up netdev queue
1909 * if it was stopped due to lack of tx wrbs. */
1910 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1911 atomic_read(&txo->q.used) < txo->q.len / 2) {
1912 netif_wake_subqueue(adapter->netdev, i);
1913 }
1914
Sathya Perlaab1594e2011-07-25 19:10:15 +00001915 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001916 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001917 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001918 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919 }
1920
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001921 mcc_compl = be_process_mcc(adapter, &status);
1922
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001923 if (mcc_compl) {
1924 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1925 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1926 }
1927
Sathya Perla3c8def92011-06-12 20:01:58 +00001928 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001929
Sathya Perla3c8def92011-06-12 20:01:58 +00001930 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001931 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 return 1;
1933}
1934
Ajit Khaparded053de92010-09-03 06:23:30 +00001935void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001936{
1937 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1938 u32 i;
1939
1940 pci_read_config_dword(adapter->pdev,
1941 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1942 pci_read_config_dword(adapter->pdev,
1943 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1944 pci_read_config_dword(adapter->pdev,
1945 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1946 pci_read_config_dword(adapter->pdev,
1947 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1948
1949 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1950 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1951
Ajit Khaparded053de92010-09-03 06:23:30 +00001952 if (ue_status_lo || ue_status_hi) {
1953 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001954 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001955 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1956 }
1957
Ajit Khaparde7c185272010-07-29 06:16:33 +00001958 if (ue_status_lo) {
1959 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1960 if (ue_status_lo & 1)
1961 dev_err(&adapter->pdev->dev,
1962 "UE: %s bit set\n", ue_status_low_desc[i]);
1963 }
1964 }
1965 if (ue_status_hi) {
1966 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1967 if (ue_status_hi & 1)
1968 dev_err(&adapter->pdev->dev,
1969 "UE: %s bit set\n", ue_status_hi_desc[i]);
1970 }
1971 }
1972
1973}
1974
Sathya Perlaea1dae12009-03-19 23:56:20 -07001975static void be_worker(struct work_struct *work)
1976{
1977 struct be_adapter *adapter =
1978 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001979 struct be_rx_obj *rxo;
1980 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001981
Sathya Perla16da8252011-03-21 20:49:27 +00001982 if (!adapter->ue_detected && !lancer_chip(adapter))
1983 be_detect_dump_ue(adapter);
1984
Somnath Koturf203af72010-10-25 23:01:03 +00001985 /* when interrupts are not yet enabled, just reap any pending
1986 * mcc completions */
1987 if (!netif_running(adapter->netdev)) {
1988 int mcc_compl, status = 0;
1989
1990 mcc_compl = be_process_mcc(adapter, &status);
1991
1992 if (mcc_compl) {
1993 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1994 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1995 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00001996
Somnath Koturf203af72010-10-25 23:01:03 +00001997 goto reschedule;
1998 }
1999
Selvin Xavier005d5692011-05-16 07:36:35 +00002000 if (!adapter->stats_cmd_sent) {
2001 if (lancer_chip(adapter))
2002 lancer_cmd_get_pport_stats(adapter,
2003 &adapter->stats_cmd);
2004 else
2005 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2006 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002007
Sathya Perla3abcded2010-10-03 22:12:27 -07002008 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002009 be_rx_eqd_update(adapter, rxo);
2010
2011 if (rxo->rx_post_starved) {
2012 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002013 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002014 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002015 }
2016
Somnath Koturf203af72010-10-25 23:01:03 +00002017reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002018 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002019 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2020}
2021
Sathya Perla8d56ff12009-11-22 22:02:26 +00002022static void be_msix_disable(struct be_adapter *adapter)
2023{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002024 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002025 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002026 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002027 }
2028}
2029
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002030static void be_msix_enable(struct be_adapter *adapter)
2031{
Sathya Perla3abcded2010-10-03 22:12:27 -07002032#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002033 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002035 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002036
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002037 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038 adapter->msix_entries[i].entry = i;
2039
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002040 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002041 if (status == 0) {
2042 goto done;
2043 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002044 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002045 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002046 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002047 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002048 }
2049 return;
2050done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002051 adapter->num_msix_vec = num_vec;
2052 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053}
2054
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002055static void be_sriov_enable(struct be_adapter *adapter)
2056{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002057 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002058#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002059 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002060 int status, pos;
2061 u16 nvfs;
2062
2063 pos = pci_find_ext_capability(adapter->pdev,
2064 PCI_EXT_CAP_ID_SRIOV);
2065 pci_read_config_word(adapter->pdev,
2066 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2067
2068 if (num_vfs > nvfs) {
2069 dev_info(&adapter->pdev->dev,
2070 "Device supports %d VFs and not %d\n",
2071 nvfs, num_vfs);
2072 num_vfs = nvfs;
2073 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002074
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002075 status = pci_enable_sriov(adapter->pdev, num_vfs);
2076 adapter->sriov_enabled = status ? false : true;
2077 }
2078#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002079}
2080
2081static void be_sriov_disable(struct be_adapter *adapter)
2082{
2083#ifdef CONFIG_PCI_IOV
2084 if (adapter->sriov_enabled) {
2085 pci_disable_sriov(adapter->pdev);
2086 adapter->sriov_enabled = false;
2087 }
2088#endif
2089}
2090
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002091static inline int be_msix_vec_get(struct be_adapter *adapter,
2092 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002094 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002095}
2096
2097static int be_request_irq(struct be_adapter *adapter,
2098 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002099 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002100{
2101 struct net_device *netdev = adapter->netdev;
2102 int vec;
2103
2104 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002105 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002106 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002107}
2108
Sathya Perla3abcded2010-10-03 22:12:27 -07002109static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2110 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002111{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002112 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002113 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002114}
2115
2116static int be_msix_register(struct be_adapter *adapter)
2117{
Sathya Perla3abcded2010-10-03 22:12:27 -07002118 struct be_rx_obj *rxo;
2119 int status, i;
2120 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121
Sathya Perla3abcded2010-10-03 22:12:27 -07002122 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2123 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124 if (status)
2125 goto err;
2126
Sathya Perla3abcded2010-10-03 22:12:27 -07002127 for_all_rx_queues(adapter, rxo, i) {
2128 sprintf(qname, "rxq%d", i);
2129 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2130 qname, rxo);
2131 if (status)
2132 goto err_msix;
2133 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002134
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002136
Sathya Perla3abcded2010-10-03 22:12:27 -07002137err_msix:
2138 be_free_irq(adapter, &adapter->tx_eq, adapter);
2139
2140 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2141 be_free_irq(adapter, &rxo->rx_eq, rxo);
2142
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143err:
2144 dev_warn(&adapter->pdev->dev,
2145 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002146 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002147 return status;
2148}
2149
2150static int be_irq_register(struct be_adapter *adapter)
2151{
2152 struct net_device *netdev = adapter->netdev;
2153 int status;
2154
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002155 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156 status = be_msix_register(adapter);
2157 if (status == 0)
2158 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002159 /* INTx is not supported for VF */
2160 if (!be_physfn(adapter))
2161 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002162 }
2163
2164 /* INTx */
2165 netdev->irq = adapter->pdev->irq;
2166 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2167 adapter);
2168 if (status) {
2169 dev_err(&adapter->pdev->dev,
2170 "INTx request IRQ failed - err %d\n", status);
2171 return status;
2172 }
2173done:
2174 adapter->isr_registered = true;
2175 return 0;
2176}
2177
2178static void be_irq_unregister(struct be_adapter *adapter)
2179{
2180 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002181 struct be_rx_obj *rxo;
2182 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183
2184 if (!adapter->isr_registered)
2185 return;
2186
2187 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002188 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189 free_irq(netdev->irq, adapter);
2190 goto done;
2191 }
2192
2193 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002194 be_free_irq(adapter, &adapter->tx_eq, adapter);
2195
2196 for_all_rx_queues(adapter, rxo, i)
2197 be_free_irq(adapter, &rxo->rx_eq, rxo);
2198
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199done:
2200 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201}
2202
Sathya Perla482c9e72011-06-29 23:33:17 +00002203static void be_rx_queues_clear(struct be_adapter *adapter)
2204{
2205 struct be_queue_info *q;
2206 struct be_rx_obj *rxo;
2207 int i;
2208
2209 for_all_rx_queues(adapter, rxo, i) {
2210 q = &rxo->q;
2211 if (q->created) {
2212 be_cmd_rxq_destroy(adapter, q);
2213 /* After the rxq is invalidated, wait for a grace time
2214 * of 1ms for all dma to end and the flush compl to
2215 * arrive
2216 */
2217 mdelay(1);
2218 be_rx_q_clean(adapter, rxo);
2219 }
2220
2221 /* Clear any residual events */
2222 q = &rxo->rx_eq.q;
2223 if (q->created)
2224 be_eq_clean(adapter, &rxo->rx_eq);
2225 }
2226}
2227
Sathya Perla889cd4b2010-05-30 23:33:45 +00002228static int be_close(struct net_device *netdev)
2229{
2230 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002231 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002232 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002233 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002234 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002235
Sathya Perla889cd4b2010-05-30 23:33:45 +00002236 be_async_mcc_disable(adapter);
2237
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002238 if (!lancer_chip(adapter))
2239 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002240
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002241 for_all_rx_queues(adapter, rxo, i)
2242 napi_disable(&rxo->rx_eq.napi);
2243
2244 napi_disable(&tx_eq->napi);
2245
2246 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002247 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2248 for_all_rx_queues(adapter, rxo, i)
2249 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002250 for_all_tx_queues(adapter, txo, i)
2251 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002252 }
2253
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002254 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002255 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002256 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002257
2258 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002259 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002260 synchronize_irq(vec);
2261 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002262 } else {
2263 synchronize_irq(netdev->irq);
2264 }
2265 be_irq_unregister(adapter);
2266
Sathya Perla889cd4b2010-05-30 23:33:45 +00002267 /* Wait for all pending tx completions to arrive so that
2268 * all tx skbs are freed.
2269 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002270 for_all_tx_queues(adapter, txo, i)
2271 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002272
Sathya Perla482c9e72011-06-29 23:33:17 +00002273 be_rx_queues_clear(adapter);
2274 return 0;
2275}
2276
2277static int be_rx_queues_setup(struct be_adapter *adapter)
2278{
2279 struct be_rx_obj *rxo;
2280 int rc, i;
2281 u8 rsstable[MAX_RSS_QS];
2282
2283 for_all_rx_queues(adapter, rxo, i) {
2284 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2285 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2286 adapter->if_handle,
2287 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2288 if (rc)
2289 return rc;
2290 }
2291
2292 if (be_multi_rxq(adapter)) {
2293 for_all_rss_queues(adapter, rxo, i)
2294 rsstable[i] = rxo->rss_id;
2295
2296 rc = be_cmd_rss_config(adapter, rsstable,
2297 adapter->num_rx_qs - 1);
2298 if (rc)
2299 return rc;
2300 }
2301
2302 /* First time posting */
2303 for_all_rx_queues(adapter, rxo, i) {
2304 be_post_rx_frags(rxo, GFP_KERNEL);
2305 napi_enable(&rxo->rx_eq.napi);
2306 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002307 return 0;
2308}
2309
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310static int be_open(struct net_device *netdev)
2311{
2312 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002313 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002314 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002316
Sathya Perla482c9e72011-06-29 23:33:17 +00002317 status = be_rx_queues_setup(adapter);
2318 if (status)
2319 goto err;
2320
Sathya Perla5fb379e2009-06-18 00:02:59 +00002321 napi_enable(&tx_eq->napi);
2322
2323 be_irq_register(adapter);
2324
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002325 if (!lancer_chip(adapter))
2326 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002327
2328 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002329 for_all_rx_queues(adapter, rxo, i) {
2330 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2331 be_cq_notify(adapter, rxo->cq.id, true, 0);
2332 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002333 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002334
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002335 /* Now that interrupts are on we can process async mcc */
2336 be_async_mcc_enable(adapter);
2337
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002338 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002339 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002340 if (status)
2341 goto err;
2342
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002343 status = be_cmd_set_flow_control(adapter,
2344 adapter->tx_fc, adapter->rx_fc);
2345 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002346 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002347 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002348
Sathya Perla889cd4b2010-05-30 23:33:45 +00002349 return 0;
2350err:
2351 be_close(adapter->netdev);
2352 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002353}
2354
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002355static int be_setup_wol(struct be_adapter *adapter, bool enable)
2356{
2357 struct be_dma_mem cmd;
2358 int status = 0;
2359 u8 mac[ETH_ALEN];
2360
2361 memset(mac, 0, ETH_ALEN);
2362
2363 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002364 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2365 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002366 if (cmd.va == NULL)
2367 return -1;
2368 memset(cmd.va, 0, cmd.size);
2369
2370 if (enable) {
2371 status = pci_write_config_dword(adapter->pdev,
2372 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2373 if (status) {
2374 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002375 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002376 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2377 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002378 return status;
2379 }
2380 status = be_cmd_enable_magic_wol(adapter,
2381 adapter->netdev->dev_addr, &cmd);
2382 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2383 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2384 } else {
2385 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2386 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2387 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2388 }
2389
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002390 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002391 return status;
2392}
2393
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002394/*
2395 * Generate a seed MAC address from the PF MAC Address using jhash.
2396 * MAC Address for VFs are assigned incrementally starting from the seed.
2397 * These addresses are programmed in the ASIC by the PF and the VF driver
2398 * queries for the MAC address during its probe.
2399 */
2400static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2401{
2402 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002403 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002404 u8 mac[ETH_ALEN];
2405
2406 be_vf_eth_addr_generate(adapter, mac);
2407
2408 for (vf = 0; vf < num_vfs; vf++) {
2409 status = be_cmd_pmac_add(adapter, mac,
2410 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002411 &adapter->vf_cfg[vf].vf_pmac_id,
2412 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002413 if (status)
2414 dev_err(&adapter->pdev->dev,
2415 "Mac address add failed for VF %d\n", vf);
2416 else
2417 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2418
2419 mac[5] += 1;
2420 }
2421 return status;
2422}
2423
2424static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2425{
2426 u32 vf;
2427
2428 for (vf = 0; vf < num_vfs; vf++) {
2429 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2430 be_cmd_pmac_del(adapter,
2431 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002432 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002433 }
2434}
2435
Sathya Perla5fb379e2009-06-18 00:02:59 +00002436static int be_setup(struct be_adapter *adapter)
2437{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002438 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002439 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002441 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002442
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002443 be_cmd_req_native_mode(adapter);
2444
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002445 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2446 BE_IF_FLAGS_BROADCAST |
2447 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002448
2449 if (be_physfn(adapter)) {
2450 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2451 BE_IF_FLAGS_PROMISCUOUS |
2452 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2453 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002454
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002455 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002456 cap_flags |= BE_IF_FLAGS_RSS;
2457 en_flags |= BE_IF_FLAGS_RSS;
2458 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002459 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002460
2461 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2462 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002463 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002464 if (status != 0)
2465 goto do_none;
2466
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002467 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002468 if (adapter->sriov_enabled) {
2469 while (vf < num_vfs) {
2470 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2471 BE_IF_FLAGS_BROADCAST;
2472 status = be_cmd_if_create(adapter, cap_flags,
2473 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002474 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002475 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002476 if (status) {
2477 dev_err(&adapter->pdev->dev,
2478 "Interface Create failed for VF %d\n",
2479 vf);
2480 goto if_destroy;
2481 }
2482 adapter->vf_cfg[vf].vf_pmac_id =
2483 BE_INVALID_PMAC_ID;
2484 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002485 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002486 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002487 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002488 status = be_cmd_mac_addr_query(adapter, mac,
2489 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2490 if (!status) {
2491 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2492 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2493 }
2494 }
2495
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496 status = be_tx_queues_create(adapter);
2497 if (status != 0)
2498 goto if_destroy;
2499
2500 status = be_rx_queues_create(adapter);
2501 if (status != 0)
2502 goto tx_qs_destroy;
2503
Sathya Perla2903dd62011-06-26 20:41:53 +00002504 /* Allow all priorities by default. A GRP5 evt may modify this */
2505 adapter->vlan_prio_bmap = 0xff;
2506
Sathya Perla5fb379e2009-06-18 00:02:59 +00002507 status = be_mcc_queues_create(adapter);
2508 if (status != 0)
2509 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002511 adapter->link_speed = -1;
2512
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513 return 0;
2514
Sathya Perla5fb379e2009-06-18 00:02:59 +00002515rx_qs_destroy:
2516 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002517tx_qs_destroy:
2518 be_tx_queues_destroy(adapter);
2519if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002520 if (be_physfn(adapter) && adapter->sriov_enabled)
2521 for (vf = 0; vf < num_vfs; vf++)
2522 if (adapter->vf_cfg[vf].vf_if_handle)
2523 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002524 adapter->vf_cfg[vf].vf_if_handle,
2525 vf + 1);
2526 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002527do_none:
2528 return status;
2529}
2530
Sathya Perla5fb379e2009-06-18 00:02:59 +00002531static int be_clear(struct be_adapter *adapter)
2532{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002533 int vf;
2534
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002535 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002536 be_vf_eth_addr_rem(adapter);
2537
Sathya Perla1a8887d2009-08-17 00:58:41 +00002538 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002539 be_rx_queues_destroy(adapter);
2540 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002541 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002542
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002543 if (be_physfn(adapter) && adapter->sriov_enabled)
2544 for (vf = 0; vf < num_vfs; vf++)
2545 if (adapter->vf_cfg[vf].vf_if_handle)
2546 be_cmd_if_destroy(adapter,
2547 adapter->vf_cfg[vf].vf_if_handle,
2548 vf + 1);
2549
Ajit Khaparde658681f2011-02-11 13:34:46 +00002550 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002551
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002552 adapter->be3_native = 0;
2553
Sathya Perla2243e2e2009-11-22 22:02:03 +00002554 /* tell fw we're done with firing cmds */
2555 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002556 return 0;
2557}
2558
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002559
Ajit Khaparde84517482009-09-04 03:12:16 +00002560#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002561static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002562 const u8 *p, u32 img_start, int image_size,
2563 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002564{
2565 u32 crc_offset;
2566 u8 flashed_crc[4];
2567 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002568
2569 crc_offset = hdr_size + img_start + image_size - 4;
2570
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002571 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002572
2573 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002574 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002575 if (status) {
2576 dev_err(&adapter->pdev->dev,
2577 "could not get crc from flash, not flashing redboot\n");
2578 return false;
2579 }
2580
2581 /*update redboot only if crc does not match*/
2582 if (!memcmp(flashed_crc, p, 4))
2583 return false;
2584 else
2585 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002586}
2587
Sathya Perla306f1342011-08-02 19:57:45 +00002588static bool phy_flashing_required(struct be_adapter *adapter)
2589{
2590 int status = 0;
2591 struct be_phy_info phy_info;
2592
2593 status = be_cmd_get_phy_info(adapter, &phy_info);
2594 if (status)
2595 return false;
2596 if ((phy_info.phy_type == TN_8022) &&
2597 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2598 return true;
2599 }
2600 return false;
2601}
2602
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002603static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002604 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002605 struct be_dma_mem *flash_cmd, int num_of_images)
2606
Ajit Khaparde84517482009-09-04 03:12:16 +00002607{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002608 int status = 0, i, filehdr_size = 0;
2609 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002610 int num_bytes;
2611 const u8 *p = fw->data;
2612 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002613 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002614 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002615
Sathya Perla306f1342011-08-02 19:57:45 +00002616 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002617 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2618 FLASH_IMAGE_MAX_SIZE_g3},
2619 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2620 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2621 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2622 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2623 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2624 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2625 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2626 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2627 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2628 FLASH_IMAGE_MAX_SIZE_g3},
2629 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2630 FLASH_IMAGE_MAX_SIZE_g3},
2631 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002632 FLASH_IMAGE_MAX_SIZE_g3},
2633 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002634 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2635 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2636 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002637 };
Joe Perches215faf92010-12-21 02:16:10 -08002638 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002639 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2640 FLASH_IMAGE_MAX_SIZE_g2},
2641 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2642 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2643 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2644 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2645 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2646 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2647 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2648 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2649 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2650 FLASH_IMAGE_MAX_SIZE_g2},
2651 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2652 FLASH_IMAGE_MAX_SIZE_g2},
2653 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2654 FLASH_IMAGE_MAX_SIZE_g2}
2655 };
2656
2657 if (adapter->generation == BE_GEN3) {
2658 pflashcomp = gen3_flash_types;
2659 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002660 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002661 } else {
2662 pflashcomp = gen2_flash_types;
2663 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002664 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002665 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002666 for (i = 0; i < num_comp; i++) {
2667 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2668 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2669 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002670 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2671 if (!phy_flashing_required(adapter))
2672 continue;
2673 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002674 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2675 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002676 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2677 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002678 continue;
2679 p = fw->data;
2680 p += filehdr_size + pflashcomp[i].offset
2681 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002682 if (p + pflashcomp[i].size > fw->data + fw->size)
2683 return -1;
2684 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002685 while (total_bytes) {
2686 if (total_bytes > 32*1024)
2687 num_bytes = 32*1024;
2688 else
2689 num_bytes = total_bytes;
2690 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002691 if (!total_bytes) {
2692 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2693 flash_op = FLASHROM_OPER_PHY_FLASH;
2694 else
2695 flash_op = FLASHROM_OPER_FLASH;
2696 } else {
2697 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2698 flash_op = FLASHROM_OPER_PHY_SAVE;
2699 else
2700 flash_op = FLASHROM_OPER_SAVE;
2701 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002702 memcpy(req->params.data_buf, p, num_bytes);
2703 p += num_bytes;
2704 status = be_cmd_write_flashrom(adapter, flash_cmd,
2705 pflashcomp[i].optype, flash_op, num_bytes);
2706 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002707 if ((status == ILLEGAL_IOCTL_REQ) &&
2708 (pflashcomp[i].optype ==
2709 IMG_TYPE_PHY_FW))
2710 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002711 dev_err(&adapter->pdev->dev,
2712 "cmd to write to flash rom failed.\n");
2713 return -1;
2714 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002715 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002716 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002717 return 0;
2718}
2719
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002720static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2721{
2722 if (fhdr == NULL)
2723 return 0;
2724 if (fhdr->build[0] == '3')
2725 return BE_GEN3;
2726 else if (fhdr->build[0] == '2')
2727 return BE_GEN2;
2728 else
2729 return 0;
2730}
2731
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002732static int lancer_fw_download(struct be_adapter *adapter,
2733 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002734{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002735#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2736#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2737 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002738 const u8 *data_ptr = NULL;
2739 u8 *dest_image_ptr = NULL;
2740 size_t image_size = 0;
2741 u32 chunk_size = 0;
2742 u32 data_written = 0;
2743 u32 offset = 0;
2744 int status = 0;
2745 u8 add_status = 0;
2746
2747 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2748 dev_err(&adapter->pdev->dev,
2749 "FW Image not properly aligned. "
2750 "Length must be 4 byte aligned.\n");
2751 status = -EINVAL;
2752 goto lancer_fw_exit;
2753 }
2754
2755 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2756 + LANCER_FW_DOWNLOAD_CHUNK;
2757 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2758 &flash_cmd.dma, GFP_KERNEL);
2759 if (!flash_cmd.va) {
2760 status = -ENOMEM;
2761 dev_err(&adapter->pdev->dev,
2762 "Memory allocation failure while flashing\n");
2763 goto lancer_fw_exit;
2764 }
2765
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002766 dest_image_ptr = flash_cmd.va +
2767 sizeof(struct lancer_cmd_req_write_object);
2768 image_size = fw->size;
2769 data_ptr = fw->data;
2770
2771 while (image_size) {
2772 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2773
2774 /* Copy the image chunk content. */
2775 memcpy(dest_image_ptr, data_ptr, chunk_size);
2776
2777 status = lancer_cmd_write_object(adapter, &flash_cmd,
2778 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2779 &data_written, &add_status);
2780
2781 if (status)
2782 break;
2783
2784 offset += data_written;
2785 data_ptr += data_written;
2786 image_size -= data_written;
2787 }
2788
2789 if (!status) {
2790 /* Commit the FW written */
2791 status = lancer_cmd_write_object(adapter, &flash_cmd,
2792 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2793 &data_written, &add_status);
2794 }
2795
2796 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2797 flash_cmd.dma);
2798 if (status) {
2799 dev_err(&adapter->pdev->dev,
2800 "Firmware load error. "
2801 "Status code: 0x%x Additional Status: 0x%x\n",
2802 status, add_status);
2803 goto lancer_fw_exit;
2804 }
2805
2806 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2807lancer_fw_exit:
2808 return status;
2809}
2810
2811static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2812{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002813 struct flash_file_hdr_g2 *fhdr;
2814 struct flash_file_hdr_g3 *fhdr3;
2815 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002816 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002817 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002818 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002819
2820 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002821 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002822
Ajit Khaparde84517482009-09-04 03:12:16 +00002823 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002824 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2825 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002826 if (!flash_cmd.va) {
2827 status = -ENOMEM;
2828 dev_err(&adapter->pdev->dev,
2829 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002830 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002831 }
2832
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002833 if ((adapter->generation == BE_GEN3) &&
2834 (get_ufigen_type(fhdr) == BE_GEN3)) {
2835 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002836 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2837 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002838 img_hdr_ptr = (struct image_hdr *) (fw->data +
2839 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002840 i * sizeof(struct image_hdr)));
2841 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2842 status = be_flash_data(adapter, fw, &flash_cmd,
2843 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002844 }
2845 } else if ((adapter->generation == BE_GEN2) &&
2846 (get_ufigen_type(fhdr) == BE_GEN2)) {
2847 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2848 } else {
2849 dev_err(&adapter->pdev->dev,
2850 "UFI and Interface are not compatible for flashing\n");
2851 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002852 }
2853
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002854 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2855 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002856 if (status) {
2857 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002858 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002859 }
2860
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002861 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002862
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002863be_fw_exit:
2864 return status;
2865}
2866
2867int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2868{
2869 const struct firmware *fw;
2870 int status;
2871
2872 if (!netif_running(adapter->netdev)) {
2873 dev_err(&adapter->pdev->dev,
2874 "Firmware load not allowed (interface is down)\n");
2875 return -1;
2876 }
2877
2878 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2879 if (status)
2880 goto fw_exit;
2881
2882 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2883
2884 if (lancer_chip(adapter))
2885 status = lancer_fw_download(adapter, fw);
2886 else
2887 status = be_fw_download(adapter, fw);
2888
Ajit Khaparde84517482009-09-04 03:12:16 +00002889fw_exit:
2890 release_firmware(fw);
2891 return status;
2892}
2893
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002894static struct net_device_ops be_netdev_ops = {
2895 .ndo_open = be_open,
2896 .ndo_stop = be_close,
2897 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002898 .ndo_set_rx_mode = be_set_multicast_list,
2899 .ndo_set_mac_address = be_mac_addr_set,
2900 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00002901 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002902 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002903 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2904 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002905 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002906 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002907 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002908 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002909};
2910
2911static void be_netdev_init(struct net_device *netdev)
2912{
2913 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002914 struct be_rx_obj *rxo;
2915 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002916
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002917 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002918 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2919 NETIF_F_HW_VLAN_TX;
2920 if (be_multi_rxq(adapter))
2921 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002922
2923 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002924 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002925
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002926 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002927 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002928
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002929 netdev->flags |= IFF_MULTICAST;
2930
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002931 /* Default settings for Rx and Tx flow control */
2932 adapter->rx_fc = true;
2933 adapter->tx_fc = true;
2934
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002935 netif_set_gso_max_size(netdev, 65535);
2936
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2938
2939 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2940
Sathya Perla3abcded2010-10-03 22:12:27 -07002941 for_all_rx_queues(adapter, rxo, i)
2942 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2943 BE_NAPI_WEIGHT);
2944
Sathya Perla5fb379e2009-06-18 00:02:59 +00002945 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002946 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002947}
2948
2949static void be_unmap_pci_bars(struct be_adapter *adapter)
2950{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002951 if (adapter->csr)
2952 iounmap(adapter->csr);
2953 if (adapter->db)
2954 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002955 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002956 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002957}
2958
2959static int be_map_pci_bars(struct be_adapter *adapter)
2960{
2961 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002962 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002963
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002964 if (lancer_chip(adapter)) {
2965 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2966 pci_resource_len(adapter->pdev, 0));
2967 if (addr == NULL)
2968 return -ENOMEM;
2969 adapter->db = addr;
2970 return 0;
2971 }
2972
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002973 if (be_physfn(adapter)) {
2974 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2975 pci_resource_len(adapter->pdev, 2));
2976 if (addr == NULL)
2977 return -ENOMEM;
2978 adapter->csr = addr;
2979 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002981 if (adapter->generation == BE_GEN2) {
2982 pcicfg_reg = 1;
2983 db_reg = 4;
2984 } else {
2985 pcicfg_reg = 0;
2986 if (be_physfn(adapter))
2987 db_reg = 4;
2988 else
2989 db_reg = 0;
2990 }
2991 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2992 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002993 if (addr == NULL)
2994 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002995 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002996
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002997 if (be_physfn(adapter)) {
2998 addr = ioremap_nocache(
2999 pci_resource_start(adapter->pdev, pcicfg_reg),
3000 pci_resource_len(adapter->pdev, pcicfg_reg));
3001 if (addr == NULL)
3002 goto pci_map_err;
3003 adapter->pcicfg = addr;
3004 } else
3005 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006
3007 return 0;
3008pci_map_err:
3009 be_unmap_pci_bars(adapter);
3010 return -ENOMEM;
3011}
3012
3013
3014static void be_ctrl_cleanup(struct be_adapter *adapter)
3015{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003016 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003017
3018 be_unmap_pci_bars(adapter);
3019
3020 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003021 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3022 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003023
Sathya Perla5b8821b2011-08-02 19:57:44 +00003024 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003025 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003026 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3027 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003028}
3029
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003030static int be_ctrl_init(struct be_adapter *adapter)
3031{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003032 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3033 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003034 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003035 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003036
3037 status = be_map_pci_bars(adapter);
3038 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003039 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003040
3041 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003042 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3043 mbox_mem_alloc->size,
3044 &mbox_mem_alloc->dma,
3045 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003046 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003047 status = -ENOMEM;
3048 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003049 }
3050 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3051 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3052 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3053 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003054
Sathya Perla5b8821b2011-08-02 19:57:44 +00003055 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3056 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3057 &rx_filter->dma, GFP_KERNEL);
3058 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003059 status = -ENOMEM;
3060 goto free_mbox;
3061 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003062 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003063
Ivan Vecera29849612010-12-14 05:43:19 +00003064 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003065 spin_lock_init(&adapter->mcc_lock);
3066 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003068 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003069 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003071
3072free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003073 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3074 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003075
3076unmap_pci_bars:
3077 be_unmap_pci_bars(adapter);
3078
3079done:
3080 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003081}
3082
3083static void be_stats_cleanup(struct be_adapter *adapter)
3084{
Sathya Perla3abcded2010-10-03 22:12:27 -07003085 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086
3087 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003088 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3089 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090}
3091
3092static int be_stats_init(struct be_adapter *adapter)
3093{
Sathya Perla3abcded2010-10-03 22:12:27 -07003094 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003095
Selvin Xavier005d5692011-05-16 07:36:35 +00003096 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003097 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003098 } else {
3099 if (lancer_chip(adapter))
3100 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3101 else
3102 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3103 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003104 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3105 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003106 if (cmd->va == NULL)
3107 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003108 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003109 return 0;
3110}
3111
3112static void __devexit be_remove(struct pci_dev *pdev)
3113{
3114 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003115
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003116 if (!adapter)
3117 return;
3118
Somnath Koturf203af72010-10-25 23:01:03 +00003119 cancel_delayed_work_sync(&adapter->work);
3120
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003121 unregister_netdev(adapter->netdev);
3122
Sathya Perla5fb379e2009-06-18 00:02:59 +00003123 be_clear(adapter);
3124
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003125 be_stats_cleanup(adapter);
3126
3127 be_ctrl_cleanup(adapter);
3128
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003129 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003130 be_sriov_disable(adapter);
3131
Sathya Perla8d56ff12009-11-22 22:02:26 +00003132 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003133
3134 pci_set_drvdata(pdev, NULL);
3135 pci_release_regions(pdev);
3136 pci_disable_device(pdev);
3137
3138 free_netdev(adapter->netdev);
3139}
3140
Sathya Perla2243e2e2009-11-22 22:02:03 +00003141static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003142{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003144 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003145
Sathya Perla8788fdc2009-07-27 22:52:03 +00003146 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003147 if (status)
3148 return status;
3149
Sathya Perla3abcded2010-10-03 22:12:27 -07003150 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3151 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003152 if (status)
3153 return status;
3154
3155 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003156
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003157 /* A default permanent address is given to each VF for Lancer*/
3158 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003159 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003160 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003161
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003162 if (status)
3163 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003164
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003165 if (!is_valid_ether_addr(mac))
3166 return -EADDRNOTAVAIL;
3167
3168 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3169 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3170 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003171
Ajit Khaparde3486be22010-07-23 02:04:54 +00003172 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003173 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3174 else
3175 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3176
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003177 status = be_cmd_get_cntl_attributes(adapter);
3178 if (status)
3179 return status;
3180
Sathya Perla3c8def92011-06-12 20:01:58 +00003181 if ((num_vfs && adapter->sriov_enabled) ||
3182 (adapter->function_mode & 0x400) ||
3183 lancer_chip(adapter) || !be_physfn(adapter)) {
3184 adapter->num_tx_qs = 1;
3185 netif_set_real_num_tx_queues(adapter->netdev,
3186 adapter->num_tx_qs);
3187 } else {
3188 adapter->num_tx_qs = MAX_TX_QS;
3189 }
3190
Sathya Perla2243e2e2009-11-22 22:02:03 +00003191 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192}
3193
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003194static int be_dev_family_check(struct be_adapter *adapter)
3195{
3196 struct pci_dev *pdev = adapter->pdev;
3197 u32 sli_intf = 0, if_type;
3198
3199 switch (pdev->device) {
3200 case BE_DEVICE_ID1:
3201 case OC_DEVICE_ID1:
3202 adapter->generation = BE_GEN2;
3203 break;
3204 case BE_DEVICE_ID2:
3205 case OC_DEVICE_ID2:
3206 adapter->generation = BE_GEN3;
3207 break;
3208 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003209 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003210 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3211 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3212 SLI_INTF_IF_TYPE_SHIFT;
3213
3214 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3215 if_type != 0x02) {
3216 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3217 return -EINVAL;
3218 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003219 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3220 SLI_INTF_FAMILY_SHIFT);
3221 adapter->generation = BE_GEN3;
3222 break;
3223 default:
3224 adapter->generation = 0;
3225 }
3226 return 0;
3227}
3228
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003229static int lancer_wait_ready(struct be_adapter *adapter)
3230{
3231#define SLIPORT_READY_TIMEOUT 500
3232 u32 sliport_status;
3233 int status = 0, i;
3234
3235 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3236 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3237 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3238 break;
3239
3240 msleep(20);
3241 }
3242
3243 if (i == SLIPORT_READY_TIMEOUT)
3244 status = -1;
3245
3246 return status;
3247}
3248
3249static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3250{
3251 int status;
3252 u32 sliport_status, err, reset_needed;
3253 status = lancer_wait_ready(adapter);
3254 if (!status) {
3255 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3256 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3257 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3258 if (err && reset_needed) {
3259 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3260 adapter->db + SLIPORT_CONTROL_OFFSET);
3261
3262 /* check adapter has corrected the error */
3263 status = lancer_wait_ready(adapter);
3264 sliport_status = ioread32(adapter->db +
3265 SLIPORT_STATUS_OFFSET);
3266 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3267 SLIPORT_STATUS_RN_MASK);
3268 if (status || sliport_status)
3269 status = -1;
3270 } else if (err || reset_needed) {
3271 status = -1;
3272 }
3273 }
3274 return status;
3275}
3276
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003277static int __devinit be_probe(struct pci_dev *pdev,
3278 const struct pci_device_id *pdev_id)
3279{
3280 int status = 0;
3281 struct be_adapter *adapter;
3282 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003283
3284 status = pci_enable_device(pdev);
3285 if (status)
3286 goto do_none;
3287
3288 status = pci_request_regions(pdev, DRV_NAME);
3289 if (status)
3290 goto disable_dev;
3291 pci_set_master(pdev);
3292
Sathya Perla3c8def92011-06-12 20:01:58 +00003293 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003294 if (netdev == NULL) {
3295 status = -ENOMEM;
3296 goto rel_reg;
3297 }
3298 adapter = netdev_priv(netdev);
3299 adapter->pdev = pdev;
3300 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003301
3302 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003303 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003304 goto free_netdev;
3305
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003306 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003307 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003308
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003309 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310 if (!status) {
3311 netdev->features |= NETIF_F_HIGHDMA;
3312 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003313 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314 if (status) {
3315 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3316 goto free_netdev;
3317 }
3318 }
3319
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003320 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003321 if (adapter->sriov_enabled) {
3322 adapter->vf_cfg = kcalloc(num_vfs,
3323 sizeof(struct be_vf_cfg), GFP_KERNEL);
3324
3325 if (!adapter->vf_cfg)
3326 goto free_netdev;
3327 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003328
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003329 status = be_ctrl_init(adapter);
3330 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003331 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003332
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003333 if (lancer_chip(adapter)) {
3334 status = lancer_test_and_set_rdy_state(adapter);
3335 if (status) {
3336 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003337 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003338 }
3339 }
3340
Sathya Perla2243e2e2009-11-22 22:02:03 +00003341 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003342 if (be_physfn(adapter)) {
3343 status = be_cmd_POST(adapter);
3344 if (status)
3345 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003346 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003347
3348 /* tell fw we're ready to fire cmds */
3349 status = be_cmd_fw_init(adapter);
3350 if (status)
3351 goto ctrl_clean;
3352
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003353 status = be_cmd_reset_function(adapter);
3354 if (status)
3355 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003356
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003357 status = be_stats_init(adapter);
3358 if (status)
3359 goto ctrl_clean;
3360
Sathya Perla2243e2e2009-11-22 22:02:03 +00003361 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003362 if (status)
3363 goto stats_clean;
3364
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003365 /* The INTR bit may be set in the card when probed by a kdump kernel
3366 * after a crash.
3367 */
3368 if (!lancer_chip(adapter))
3369 be_intr_set(adapter, false);
3370
Sathya Perla3abcded2010-10-03 22:12:27 -07003371 be_msix_enable(adapter);
3372
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003373 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003374
Sathya Perla5fb379e2009-06-18 00:02:59 +00003375 status = be_setup(adapter);
3376 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003377 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003378
Sathya Perla3abcded2010-10-03 22:12:27 -07003379 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003380 status = register_netdev(netdev);
3381 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003382 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003383
Ajit Khapardee6319362011-02-11 13:35:41 +00003384 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003385 u8 mac_speed;
Ajit Khaparded0381c42011-04-19 12:11:55 +00003386 u16 vf, lnk_speed;
3387
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003388 if (!lancer_chip(adapter)) {
3389 status = be_vf_eth_addr_config(adapter);
3390 if (status)
3391 goto unreg_netdev;
3392 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003393
3394 for (vf = 0; vf < num_vfs; vf++) {
Sathya Perlaea172a02011-08-02 19:57:42 +00003395 status = be_cmd_link_status_query(adapter, &mac_speed,
3396 &lnk_speed, vf + 1);
Ajit Khaparded0381c42011-04-19 12:11:55 +00003397 if (!status)
3398 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3399 else
3400 goto unreg_netdev;
3401 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003402 }
3403
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003404 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003405
Somnath Koturf203af72010-10-25 23:01:03 +00003406 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003407 return 0;
3408
Ajit Khapardee6319362011-02-11 13:35:41 +00003409unreg_netdev:
3410 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003411unsetup:
3412 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003413msix_disable:
3414 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003415stats_clean:
3416 be_stats_cleanup(adapter);
3417ctrl_clean:
3418 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003419free_vf_cfg:
3420 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003421free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003422 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003423 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003424 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003425rel_reg:
3426 pci_release_regions(pdev);
3427disable_dev:
3428 pci_disable_device(pdev);
3429do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003430 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003431 return status;
3432}
3433
3434static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3435{
3436 struct be_adapter *adapter = pci_get_drvdata(pdev);
3437 struct net_device *netdev = adapter->netdev;
3438
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003439 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003440 if (adapter->wol)
3441 be_setup_wol(adapter, true);
3442
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003443 netif_device_detach(netdev);
3444 if (netif_running(netdev)) {
3445 rtnl_lock();
3446 be_close(netdev);
3447 rtnl_unlock();
3448 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003449 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003450 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003451
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003452 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003453 pci_save_state(pdev);
3454 pci_disable_device(pdev);
3455 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3456 return 0;
3457}
3458
3459static int be_resume(struct pci_dev *pdev)
3460{
3461 int status = 0;
3462 struct be_adapter *adapter = pci_get_drvdata(pdev);
3463 struct net_device *netdev = adapter->netdev;
3464
3465 netif_device_detach(netdev);
3466
3467 status = pci_enable_device(pdev);
3468 if (status)
3469 return status;
3470
3471 pci_set_power_state(pdev, 0);
3472 pci_restore_state(pdev);
3473
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003474 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003475 /* tell fw we're ready to fire cmds */
3476 status = be_cmd_fw_init(adapter);
3477 if (status)
3478 return status;
3479
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003480 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003481 if (netif_running(netdev)) {
3482 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003483 be_open(netdev);
3484 rtnl_unlock();
3485 }
3486 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003487
3488 if (adapter->wol)
3489 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003490
3491 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003492 return 0;
3493}
3494
Sathya Perla82456b02010-02-17 01:35:37 +00003495/*
3496 * An FLR will stop BE from DMAing any data.
3497 */
3498static void be_shutdown(struct pci_dev *pdev)
3499{
3500 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003501
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003502 if (!adapter)
3503 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003504
Sathya Perla0f4a6822011-03-21 20:49:28 +00003505 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003506
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003507 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003508
Sathya Perla82456b02010-02-17 01:35:37 +00003509 if (adapter->wol)
3510 be_setup_wol(adapter, true);
3511
Ajit Khaparde57841862011-04-06 18:08:43 +00003512 be_cmd_reset_function(adapter);
3513
Sathya Perla82456b02010-02-17 01:35:37 +00003514 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003515}
3516
Sathya Perlacf588472010-02-14 21:22:01 +00003517static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3518 pci_channel_state_t state)
3519{
3520 struct be_adapter *adapter = pci_get_drvdata(pdev);
3521 struct net_device *netdev = adapter->netdev;
3522
3523 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3524
3525 adapter->eeh_err = true;
3526
3527 netif_device_detach(netdev);
3528
3529 if (netif_running(netdev)) {
3530 rtnl_lock();
3531 be_close(netdev);
3532 rtnl_unlock();
3533 }
3534 be_clear(adapter);
3535
3536 if (state == pci_channel_io_perm_failure)
3537 return PCI_ERS_RESULT_DISCONNECT;
3538
3539 pci_disable_device(pdev);
3540
3541 return PCI_ERS_RESULT_NEED_RESET;
3542}
3543
3544static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3545{
3546 struct be_adapter *adapter = pci_get_drvdata(pdev);
3547 int status;
3548
3549 dev_info(&adapter->pdev->dev, "EEH reset\n");
3550 adapter->eeh_err = false;
3551
3552 status = pci_enable_device(pdev);
3553 if (status)
3554 return PCI_ERS_RESULT_DISCONNECT;
3555
3556 pci_set_master(pdev);
3557 pci_set_power_state(pdev, 0);
3558 pci_restore_state(pdev);
3559
3560 /* Check if card is ok and fw is ready */
3561 status = be_cmd_POST(adapter);
3562 if (status)
3563 return PCI_ERS_RESULT_DISCONNECT;
3564
3565 return PCI_ERS_RESULT_RECOVERED;
3566}
3567
3568static void be_eeh_resume(struct pci_dev *pdev)
3569{
3570 int status = 0;
3571 struct be_adapter *adapter = pci_get_drvdata(pdev);
3572 struct net_device *netdev = adapter->netdev;
3573
3574 dev_info(&adapter->pdev->dev, "EEH resume\n");
3575
3576 pci_save_state(pdev);
3577
3578 /* tell fw we're ready to fire cmds */
3579 status = be_cmd_fw_init(adapter);
3580 if (status)
3581 goto err;
3582
3583 status = be_setup(adapter);
3584 if (status)
3585 goto err;
3586
3587 if (netif_running(netdev)) {
3588 status = be_open(netdev);
3589 if (status)
3590 goto err;
3591 }
3592 netif_device_attach(netdev);
3593 return;
3594err:
3595 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003596}
3597
3598static struct pci_error_handlers be_eeh_handlers = {
3599 .error_detected = be_eeh_err_detected,
3600 .slot_reset = be_eeh_reset,
3601 .resume = be_eeh_resume,
3602};
3603
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003604static struct pci_driver be_driver = {
3605 .name = DRV_NAME,
3606 .id_table = be_dev_ids,
3607 .probe = be_probe,
3608 .remove = be_remove,
3609 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003610 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003611 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003612 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003613};
3614
3615static int __init be_init_module(void)
3616{
Joe Perches8e95a202009-12-03 07:58:21 +00003617 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3618 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003619 printk(KERN_WARNING DRV_NAME
3620 " : Module param rx_frag_size must be 2048/4096/8192."
3621 " Using 2048\n");
3622 rx_frag_size = 2048;
3623 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003624
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003625 return pci_register_driver(&be_driver);
3626}
3627module_init(be_init_module);
3628
3629static void __exit be_exit_module(void)
3630{
3631 pci_unregister_driver(&be_driver);
3632}
3633module_exit(be_exit_module);