blob: 9cfbfdfb36744e2dee9ea1c2aced3effade1ac26 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147
Sathya Perlacf588472010-02-14 21:22:01 +0000148 if (adapter->eeh_err)
149 return;
150
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 iowrite32(reg, addr);
159}
160
Sathya Perla8788fdc2009-07-27 22:52:03 +0000161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000166
167 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000176
177 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179}
180
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000188
189 if (adapter->eeh_err)
190 return;
191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000207
208 if (adapter->eeh_err)
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
Ajit Khapardef8617e02011-02-11 13:36:37 +0000232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000234 if (status)
235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000238 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000239netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000246static void populate_be2_stats(struct be_adapter *adapter)
247{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000251 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000254
Sathya Perlaac124ff2011-07-25 19:10:14 +0000255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000281 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000282 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000283 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
Selvin Xavier005d5692011-05-16 07:36:35 +0000340static void populate_lancer_stats(struct be_adapter *adapter)
341{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000342
Selvin Xavier005d5692011-05-16 07:36:35 +0000343 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000374 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000376}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377
378void be_parse_stats(struct be_adapter *adapter)
379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397}
398
Sathya Perlab31c50a2009-09-17 10:30:13 -0700399void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700400{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde78122a52009-10-07 03:11:20 -0700402 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700403 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000404 struct be_tx_obj *txo;
Sathya Perla6e533912011-06-26 20:40:48 +0000405 unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -0700406 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700407
Sathya Perla3abcded2010-10-03 22:12:27 -0700408 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6e533912011-06-26 20:40:48 +0000409 pkts += rx_stats(rxo)->rx_pkts;
410 bytes += rx_stats(rxo)->rx_bytes;
411 mcast += rx_stats(rxo)->rx_mcast_pkts;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000412 drops += rx_stats(rxo)->rx_drops_no_skbs;
Sathya Perla3abcded2010-10-03 22:12:27 -0700413 }
Sathya Perla6e533912011-06-26 20:40:48 +0000414 dev_stats->rx_packets = pkts;
415 dev_stats->rx_bytes = bytes;
416 dev_stats->multicast = mcast;
417 dev_stats->rx_dropped = drops;
Sathya Perla3abcded2010-10-03 22:12:27 -0700418
Sathya Perla6e533912011-06-26 20:40:48 +0000419 pkts = bytes = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000420 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 pkts += tx_stats(txo)->tx_pkts;
422 bytes += tx_stats(txo)->tx_bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000423 }
Sathya Perla6e533912011-06-26 20:40:48 +0000424 dev_stats->tx_packets = pkts;
425 dev_stats->tx_bytes = bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700426
427 /* bad pkts received */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 dev_stats->rx_errors = drvs->rx_crc_errors +
429 drvs->rx_alignment_symbol_errors +
430 drvs->rx_in_range_errors +
431 drvs->rx_out_range_errors +
432 drvs->rx_frame_too_long +
433 drvs->rx_dropped_too_small +
434 drvs->rx_dropped_too_short +
435 drvs->rx_dropped_header_too_small +
436 drvs->rx_dropped_tcp_length +
437 drvs->rx_dropped_runt +
438 drvs->rx_tcp_checksum_errs +
439 drvs->rx_ip_checksum_errs +
440 drvs->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700442 /* detailed rx errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
444 drvs->rx_out_range_errors +
445 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000446
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700448
449 /* frame alignment errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000450 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000451
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700452 /* receiver fifo overrun */
453 /* drops_no_pbuf is no per i/f, it's per BE card */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000454 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
455 drvs->rx_input_fifo_overflow_drop +
456 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700457}
458
Sathya Perla8788fdc2009-07-27 22:52:03 +0000459void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700460{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700461 struct net_device *netdev = adapter->netdev;
462
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463 /* If link came up or went down */
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000464 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000465 adapter->link_speed = -1;
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000466 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467 netif_carrier_on(netdev);
468 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000469 } else {
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000470 netif_carrier_off(netdev);
471 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472 }
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000473 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700475}
476
Sathya Perla3c8def92011-06-12 20:01:58 +0000477static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000478 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479{
Sathya Perla3c8def92011-06-12 20:01:58 +0000480 struct be_tx_stats *stats = tx_stats(txo);
481
Sathya Perlaac124ff2011-07-25 19:10:14 +0000482 stats->tx_reqs++;
483 stats->tx_wrbs += wrb_cnt;
484 stats->tx_bytes += copied;
485 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000487 stats->tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700488}
489
490/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000491static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
492 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700494 int cnt = (skb->len > skb->data_len);
495
496 cnt += skb_shinfo(skb)->nr_frags;
497
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498 /* to account for hdr wrb */
499 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000500 if (lancer_chip(adapter) || !(cnt & 1)) {
501 *dummy = false;
502 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503 /* add a dummy to make it an even num */
504 cnt++;
505 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000506 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
508 return cnt;
509}
510
511static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
512{
513 wrb->frag_pa_hi = upper_32_bits(addr);
514 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
515 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
516}
517
Somnath Koturcc4ce022010-10-21 07:11:14 -0700518static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
519 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700521 u8 vlan_prio = 0;
522 u16 vlan_tag = 0;
523
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524 memset(hdr, 0, sizeof(*hdr));
525
526 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
527
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000528 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
530 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
531 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000532 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000533 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000534 if (lancer_chip(adapter) && adapter->sli_family ==
535 LANCER_A0_SLI_FAMILY) {
536 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
537 if (is_tcp_pkt(skb))
538 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
539 tcpcs, hdr, 1);
540 else if (is_udp_pkt(skb))
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
542 udpcs, hdr, 1);
543 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700544 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
545 if (is_tcp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
547 else if (is_udp_pkt(skb))
548 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
549 }
550
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700551 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700553 vlan_tag = vlan_tx_tag_get(skb);
554 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
555 /* If vlan priority provided by OS is NOT in available bmap */
556 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
557 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
558 adapter->recommended_prio;
559 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560 }
561
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
566}
567
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000568static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000569 bool unmap_single)
570{
571 dma_addr_t dma;
572
573 be_dws_le_to_cpu(wrb, sizeof(*wrb));
574
575 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000576 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000577 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000578 dma_unmap_single(dev, dma, wrb->frag_len,
579 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000580 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000581 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000582 }
583}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584
Sathya Perla3c8def92011-06-12 20:01:58 +0000585static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
587{
Sathya Perla7101e112010-03-22 20:41:12 +0000588 dma_addr_t busaddr;
589 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000590 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592 struct be_eth_wrb *wrb;
593 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000594 bool map_single = false;
595 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 hdr = queue_head_node(txq);
598 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000599 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700600
David S. Millerebc8d2a2009-06-09 01:01:31 -0700601 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700602 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000603 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
604 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000605 goto dma_err;
606 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700607 wrb = queue_head_node(txq);
608 wrb_fill(wrb, busaddr, len);
609 be_dws_cpu_to_le(wrb, sizeof(*wrb));
610 queue_head_inc(txq);
611 copied += len;
612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
David S. Millerebc8d2a2009-06-09 01:01:31 -0700614 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
615 struct skb_frag_struct *frag =
616 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000617 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
618 frag->size, DMA_TO_DEVICE);
619 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000620 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700621 wrb = queue_head_node(txq);
622 wrb_fill(wrb, busaddr, frag->size);
623 be_dws_cpu_to_le(wrb, sizeof(*wrb));
624 queue_head_inc(txq);
625 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 }
627
628 if (dummy_wrb) {
629 wrb = queue_head_node(txq);
630 wrb_fill(wrb, 0, 0);
631 be_dws_cpu_to_le(wrb, sizeof(*wrb));
632 queue_head_inc(txq);
633 }
634
Somnath Koturcc4ce022010-10-21 07:11:14 -0700635 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 be_dws_cpu_to_le(hdr, sizeof(*hdr));
637
638 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000639dma_err:
640 txq->head = map_head;
641 while (copied) {
642 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000643 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000644 map_single = false;
645 copied -= wrb->frag_len;
646 queue_head_inc(txq);
647 }
648 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649}
650
Stephen Hemminger613573252009-08-31 19:50:58 +0000651static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700652 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653{
654 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000655 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
656 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 u32 wrb_cnt = 0, copied = 0;
658 u32 start = txq->head;
659 bool dummy_wrb, stopped = false;
660
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000661 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000664 if (copied) {
665 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000666 BUG_ON(txo->sent_skb_list[start]);
667 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000669 /* Ensure txq has space for the next skb; Else stop the queue
670 * *BEFORE* ringing the tx doorbell, so that we serialze the
671 * tx compls of the current transmit which'll wake up the queue
672 */
Sathya Perla7101e112010-03-22 20:41:12 +0000673 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000674 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
675 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000676 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000677 stopped = true;
678 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000680 be_txq_notify(adapter, txq->id, wrb_cnt);
681
Sathya Perla3c8def92011-06-12 20:01:58 +0000682 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000683 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000684 } else {
685 txq->head = start;
686 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 return NETDEV_TX_OK;
689}
690
691static int be_change_mtu(struct net_device *netdev, int new_mtu)
692{
693 struct be_adapter *adapter = netdev_priv(netdev);
694 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000695 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
696 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 dev_info(&adapter->pdev->dev,
698 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000699 BE_MIN_MTU,
700 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701 return -EINVAL;
702 }
703 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
704 netdev->mtu, new_mtu);
705 netdev->mtu = new_mtu;
706 return 0;
707}
708
709/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000710 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
711 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000713static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715 u16 vtag[BE_NUM_VLANS_SUPPORTED];
716 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000717 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000718 u32 if_handle;
719
720 if (vf) {
721 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
722 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
723 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
724 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725
Ajit Khaparde82903e42010-02-09 01:34:57 +0000726 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700727 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000728 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729 if (adapter->vlan_tag[i]) {
730 vtag[ntags] = cpu_to_le16(i);
731 ntags++;
732 }
733 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700734 status = be_cmd_vlan_config(adapter, adapter->if_handle,
735 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700737 status = be_cmd_vlan_config(adapter, adapter->if_handle,
738 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000740
Sathya Perlab31c50a2009-09-17 10:30:13 -0700741 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742}
743
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
745{
746 struct be_adapter *adapter = netdev_priv(netdev);
747
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000748 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000749 if (!be_physfn(adapter))
750 return;
751
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000753 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000754 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755}
756
757static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
758{
759 struct be_adapter *adapter = netdev_priv(netdev);
760
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000761 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000762
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000763 if (!be_physfn(adapter))
764 return;
765
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000767 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000768 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700769}
770
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771static void be_set_multicast_list(struct net_device *netdev)
772{
773 struct be_adapter *adapter = netdev_priv(netdev);
774
775 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000776 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000777 adapter->promiscuous = true;
778 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000780
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300781 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000782 if (adapter->promiscuous) {
783 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000784 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000785 }
786
Sathya Perlae7b909a2009-11-22 22:01:10 +0000787 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000788 if (netdev->flags & IFF_ALLMULTI ||
789 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000790 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000791 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000792 goto done;
793 }
794
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000795 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800796 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000797done:
798 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799}
800
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000801static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
802{
803 struct be_adapter *adapter = netdev_priv(netdev);
804 int status;
805
806 if (!adapter->sriov_enabled)
807 return -EPERM;
808
809 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
810 return -EINVAL;
811
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000812 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
813 status = be_cmd_pmac_del(adapter,
814 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000815 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000816
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000817 status = be_cmd_pmac_add(adapter, mac,
818 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000819 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000820
821 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000822 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
823 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000824 else
825 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
826
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000827 return status;
828}
829
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000830static int be_get_vf_config(struct net_device *netdev, int vf,
831 struct ifla_vf_info *vi)
832{
833 struct be_adapter *adapter = netdev_priv(netdev);
834
835 if (!adapter->sriov_enabled)
836 return -EPERM;
837
838 if (vf >= num_vfs)
839 return -EINVAL;
840
841 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000842 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000843 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000844 vi->qos = 0;
845 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
846
847 return 0;
848}
849
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000850static int be_set_vf_vlan(struct net_device *netdev,
851 int vf, u16 vlan, u8 qos)
852{
853 struct be_adapter *adapter = netdev_priv(netdev);
854 int status = 0;
855
856 if (!adapter->sriov_enabled)
857 return -EPERM;
858
859 if ((vf >= num_vfs) || (vlan > 4095))
860 return -EINVAL;
861
862 if (vlan) {
863 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
864 adapter->vlans_added++;
865 } else {
866 adapter->vf_cfg[vf].vf_vlan_tag = 0;
867 adapter->vlans_added--;
868 }
869
870 status = be_vid_config(adapter, true, vf);
871
872 if (status)
873 dev_info(&adapter->pdev->dev,
874 "VLAN %d config on VF %d failed\n", vlan, vf);
875 return status;
876}
877
Ajit Khapardee1d18732010-07-23 01:52:13 +0000878static int be_set_vf_tx_rate(struct net_device *netdev,
879 int vf, int rate)
880{
881 struct be_adapter *adapter = netdev_priv(netdev);
882 int status = 0;
883
884 if (!adapter->sriov_enabled)
885 return -EPERM;
886
887 if ((vf >= num_vfs) || (rate < 0))
888 return -EINVAL;
889
890 if (rate > 10000)
891 rate = 10000;
892
893 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000894 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000895
896 if (status)
897 dev_info(&adapter->pdev->dev,
898 "tx rate %d on VF %d failed\n", rate, vf);
899 return status;
900}
901
Sathya Perlaac124ff2011-07-25 19:10:14 +0000902static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700903{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000904 struct be_eq_obj *rx_eq = &rxo->rx_eq;
905 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700906 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000907 ulong delta = now - stats->rx_jiffies;
908 u32 eqd;
909
910 if (!rx_eq->enable_aic)
911 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700912
Sathya Perla4097f662009-03-24 16:40:13 -0700913 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700914 if (time_before(now, stats->rx_jiffies)) {
915 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700916 return;
917 }
918
Sathya Perlaac124ff2011-07-25 19:10:14 +0000919 /* Update once a second */
920 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700921 return;
922
Sathya Perlaac124ff2011-07-25 19:10:14 +0000923 stats->rx_pps = (stats->rx_pkts - stats->rx_pkts_prev) / (delta / HZ);
924 stats->rx_pkts_prev = stats->rx_pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700925 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000926 eqd = stats->rx_pps / 110000;
927 eqd = eqd << 3;
928 if (eqd > rx_eq->max_eqd)
929 eqd = rx_eq->max_eqd;
930 if (eqd < rx_eq->min_eqd)
931 eqd = rx_eq->min_eqd;
932 if (eqd < 10)
933 eqd = 0;
934 if (eqd != rx_eq->cur_eqd) {
935 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
936 rx_eq->cur_eqd = eqd;
937 }
Sathya Perla4097f662009-03-24 16:40:13 -0700938}
939
Sathya Perla3abcded2010-10-03 22:12:27 -0700940static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000941 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700942{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000943 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700944
Sathya Perla3abcded2010-10-03 22:12:27 -0700945 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000946 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700947 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000948 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700949 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000950 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000951 stats->rx_compl_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700952}
953
Sathya Perla2e588f82011-03-11 02:49:26 +0000954static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700955{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000956 /* L4 checksum is not reliable for non TCP/UDP packets.
957 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000958 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
959 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700960}
961
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700962static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700963get_rx_page_info(struct be_adapter *adapter,
964 struct be_rx_obj *rxo,
965 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700966{
967 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700968 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969
Sathya Perla3abcded2010-10-03 22:12:27 -0700970 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700971 BUG_ON(!rx_page_info->page);
972
Ajit Khaparde205859a2010-02-09 01:34:21 +0000973 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000974 dma_unmap_page(&adapter->pdev->dev,
975 dma_unmap_addr(rx_page_info, bus),
976 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000977 rx_page_info->last_page_user = false;
978 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700979
980 atomic_dec(&rxq->used);
981 return rx_page_info;
982}
983
984/* Throwaway the data in the Rx completion */
985static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700986 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000987 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700988{
Sathya Perla3abcded2010-10-03 22:12:27 -0700989 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700990 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +0000991 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700992
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +0000993 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +0000994 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +0000995 put_page(page_info->page);
996 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +0000997 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700998 }
999}
1000
1001/*
1002 * skb_fill_rx_data forms a complete skb for an ether frame
1003 * indicated by rxcp.
1004 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001005static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001006 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001007{
Sathya Perla3abcded2010-10-03 22:12:27 -07001008 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001010 u16 i, j;
1011 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012 u8 *start;
1013
Sathya Perla2e588f82011-03-11 02:49:26 +00001014 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015 start = page_address(page_info->page) + page_info->page_offset;
1016 prefetch(start);
1017
1018 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001019 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020
1021 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001022 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023 memcpy(skb->data, start, hdr_len);
1024 skb->len = curr_frag_len;
1025 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1026 /* Complete packet has now been moved to data */
1027 put_page(page_info->page);
1028 skb->data_len = 0;
1029 skb->tail += curr_frag_len;
1030 } else {
1031 skb_shinfo(skb)->nr_frags = 1;
1032 skb_shinfo(skb)->frags[0].page = page_info->page;
1033 skb_shinfo(skb)->frags[0].page_offset =
1034 page_info->page_offset + hdr_len;
1035 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1036 skb->data_len = curr_frag_len - hdr_len;
1037 skb->tail += hdr_len;
1038 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001039 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040
Sathya Perla2e588f82011-03-11 02:49:26 +00001041 if (rxcp->pkt_size <= rx_frag_size) {
1042 BUG_ON(rxcp->num_rcvd != 1);
1043 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044 }
1045
1046 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001047 index_inc(&rxcp->rxq_idx, rxq->len);
1048 remaining = rxcp->pkt_size - curr_frag_len;
1049 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1050 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1051 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001053 /* Coalesce all frags from the same physical page in one slot */
1054 if (page_info->page_offset == 0) {
1055 /* Fresh page */
1056 j++;
1057 skb_shinfo(skb)->frags[j].page = page_info->page;
1058 skb_shinfo(skb)->frags[j].page_offset =
1059 page_info->page_offset;
1060 skb_shinfo(skb)->frags[j].size = 0;
1061 skb_shinfo(skb)->nr_frags++;
1062 } else {
1063 put_page(page_info->page);
1064 }
1065
1066 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067 skb->len += curr_frag_len;
1068 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069
Sathya Perla2e588f82011-03-11 02:49:26 +00001070 remaining -= curr_frag_len;
1071 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001072 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001074 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001075}
1076
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001077/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001079 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001080 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001082 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001084
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001085 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001086 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001087 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001088 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089 return;
1090 }
1091
Sathya Perla2e588f82011-03-11 02:49:26 +00001092 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001094 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001095 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001096 else
1097 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098
1099 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001100 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001101 if (adapter->netdev->features & NETIF_F_RXHASH)
1102 skb->rxhash = rxcp->rss_hash;
1103
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001105 if (unlikely(rxcp->vlanf))
1106 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1107
1108 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001109}
1110
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001111/* Process the RX completion indicated by rxcp when GRO is enabled */
1112static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001113 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001114 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115{
1116 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001117 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001118 struct be_queue_info *rxq = &rxo->q;
1119 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001120 u16 remaining, curr_frag_len;
1121 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001122
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001123 skb = napi_get_frags(&eq_obj->napi);
1124 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001125 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001126 return;
1127 }
1128
Sathya Perla2e588f82011-03-11 02:49:26 +00001129 remaining = rxcp->pkt_size;
1130 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1131 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132
1133 curr_frag_len = min(remaining, rx_frag_size);
1134
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001135 /* Coalesce all frags from the same physical page in one slot */
1136 if (i == 0 || page_info->page_offset == 0) {
1137 /* First frag or Fresh page */
1138 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001139 skb_shinfo(skb)->frags[j].page = page_info->page;
1140 skb_shinfo(skb)->frags[j].page_offset =
1141 page_info->page_offset;
1142 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001143 } else {
1144 put_page(page_info->page);
1145 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001146 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001147
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001149 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150 memset(page_info, 0, sizeof(*page_info));
1151 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001152 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001154 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001155 skb->len = rxcp->pkt_size;
1156 skb->data_len = rxcp->pkt_size;
1157 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001158 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001159 if (adapter->netdev->features & NETIF_F_RXHASH)
1160 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001161
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001162 if (unlikely(rxcp->vlanf))
1163 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1164
1165 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166}
1167
Sathya Perla2e588f82011-03-11 02:49:26 +00001168static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1169 struct be_eth_rx_compl *compl,
1170 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171{
Sathya Perla2e588f82011-03-11 02:49:26 +00001172 rxcp->pkt_size =
1173 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1174 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1175 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1176 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001177 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001178 rxcp->ip_csum =
1179 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1180 rxcp->l4_csum =
1181 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1182 rxcp->ipv6 =
1183 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1184 rxcp->rxq_idx =
1185 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1186 rxcp->num_rcvd =
1187 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1188 rxcp->pkt_type =
1189 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001190 rxcp->rss_hash =
1191 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001192 if (rxcp->vlanf) {
1193 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001194 compl);
1195 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1196 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001197 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001198}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199
Sathya Perla2e588f82011-03-11 02:49:26 +00001200static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1201 struct be_eth_rx_compl *compl,
1202 struct be_rx_compl_info *rxcp)
1203{
1204 rxcp->pkt_size =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1206 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1207 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1208 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001209 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001210 rxcp->ip_csum =
1211 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1212 rxcp->l4_csum =
1213 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1214 rxcp->ipv6 =
1215 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1216 rxcp->rxq_idx =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1218 rxcp->num_rcvd =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1220 rxcp->pkt_type =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001222 rxcp->rss_hash =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001224 if (rxcp->vlanf) {
1225 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001226 compl);
1227 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1228 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001229 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001230}
1231
1232static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1233{
1234 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1235 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1236 struct be_adapter *adapter = rxo->adapter;
1237
1238 /* For checking the valid bit it is Ok to use either definition as the
1239 * valid bit is at the same position in both v0 and v1 Rx compl */
1240 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 return NULL;
1242
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001243 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001244 be_dws_le_to_cpu(compl, sizeof(*compl));
1245
1246 if (adapter->be3_native)
1247 be_parse_rx_compl_v1(adapter, compl, rxcp);
1248 else
1249 be_parse_rx_compl_v0(adapter, compl, rxcp);
1250
Sathya Perla15d72182011-03-21 20:49:26 +00001251 if (rxcp->vlanf) {
1252 /* vlanf could be wrongly set in some cards.
1253 * ignore if vtm is not set */
1254 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1255 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001256
Sathya Perla15d72182011-03-21 20:49:26 +00001257 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001258 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001259
David S. Miller3c709f82011-05-11 14:26:15 -04001260 if (((adapter->pvid & VLAN_VID_MASK) ==
1261 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1262 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001263 rxcp->vlanf = 0;
1264 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001265
1266 /* As the compl has been parsed, reset it; we wont touch it again */
1267 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001268
Sathya Perla3abcded2010-10-03 22:12:27 -07001269 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270 return rxcp;
1271}
1272
Eric Dumazet1829b082011-03-01 05:48:12 +00001273static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001276
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001278 gfp |= __GFP_COMP;
1279 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280}
1281
1282/*
1283 * Allocate a page, split it to fragments of size rx_frag_size and post as
1284 * receive buffers to BE
1285 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001286static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287{
Sathya Perla3abcded2010-10-03 22:12:27 -07001288 struct be_adapter *adapter = rxo->adapter;
1289 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001290 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001291 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292 struct page *pagep = NULL;
1293 struct be_eth_rx_d *rxd;
1294 u64 page_dmaaddr = 0, frag_dmaaddr;
1295 u32 posted, page_offset = 0;
1296
Sathya Perla3abcded2010-10-03 22:12:27 -07001297 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001298 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1299 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001300 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001302 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303 break;
1304 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001305 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1306 0, adapter->big_page_size,
1307 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308 page_info->page_offset = 0;
1309 } else {
1310 get_page(pagep);
1311 page_info->page_offset = page_offset + rx_frag_size;
1312 }
1313 page_offset = page_info->page_offset;
1314 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001315 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1317
1318 rxd = queue_head_node(rxq);
1319 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1320 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321
1322 /* Any space left in the current big page for another frag? */
1323 if ((page_offset + rx_frag_size + rx_frag_size) >
1324 adapter->big_page_size) {
1325 pagep = NULL;
1326 page_info->last_page_user = true;
1327 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001328
1329 prev_page_info = page_info;
1330 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001331 page_info = &page_info_tbl[rxq->head];
1332 }
1333 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001334 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335
1336 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001338 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001339 } else if (atomic_read(&rxq->used) == 0) {
1340 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001341 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001343}
1344
Sathya Perla5fb379e2009-06-18 00:02:59 +00001345static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1348
1349 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1350 return NULL;
1351
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001352 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1354
1355 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1356
1357 queue_tail_inc(tx_cq);
1358 return txcp;
1359}
1360
Sathya Perla3c8def92011-06-12 20:01:58 +00001361static u16 be_tx_compl_process(struct be_adapter *adapter,
1362 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363{
Sathya Perla3c8def92011-06-12 20:01:58 +00001364 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001365 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001366 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001368 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1369 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001371 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001373 sent_skbs[txq->tail] = NULL;
1374
1375 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001376 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001378 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001380 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001381 unmap_tx_frag(&adapter->pdev->dev, wrb,
1382 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001383 unmap_skb_hdr = false;
1384
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 num_wrbs++;
1386 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001387 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001390 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391}
1392
Sathya Perla859b1e42009-08-10 03:43:51 +00001393static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1394{
1395 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1396
1397 if (!eqe->evt)
1398 return NULL;
1399
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001400 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001401 eqe->evt = le32_to_cpu(eqe->evt);
1402 queue_tail_inc(&eq_obj->q);
1403 return eqe;
1404}
1405
1406static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001407 struct be_eq_obj *eq_obj,
1408 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001409{
1410 struct be_eq_entry *eqe;
1411 u16 num = 0;
1412
1413 while ((eqe = event_get(eq_obj)) != NULL) {
1414 eqe->evt = 0;
1415 num++;
1416 }
1417
1418 /* Deal with any spurious interrupts that come
1419 * without events
1420 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001421 if (!num)
1422 rearm = true;
1423
1424 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001425 if (num)
1426 napi_schedule(&eq_obj->napi);
1427
1428 return num;
1429}
1430
1431/* Just read and notify events without processing them.
1432 * Used at the time of destroying event queues */
1433static void be_eq_clean(struct be_adapter *adapter,
1434 struct be_eq_obj *eq_obj)
1435{
1436 struct be_eq_entry *eqe;
1437 u16 num = 0;
1438
1439 while ((eqe = event_get(eq_obj)) != NULL) {
1440 eqe->evt = 0;
1441 num++;
1442 }
1443
1444 if (num)
1445 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1446}
1447
Sathya Perla3abcded2010-10-03 22:12:27 -07001448static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449{
1450 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001451 struct be_queue_info *rxq = &rxo->q;
1452 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001453 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454 u16 tail;
1455
1456 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001457 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1458 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001459 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 }
1461
1462 /* Then free posted rx buffer that were not used */
1463 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001464 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001465 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466 put_page(page_info->page);
1467 memset(page_info, 0, sizeof(*page_info));
1468 }
1469 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001470 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471}
1472
Sathya Perla3c8def92011-06-12 20:01:58 +00001473static void be_tx_compl_clean(struct be_adapter *adapter,
1474 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475{
Sathya Perla3c8def92011-06-12 20:01:58 +00001476 struct be_queue_info *tx_cq = &txo->cq;
1477 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001478 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001479 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001480 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001481 struct sk_buff *sent_skb;
1482 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483
Sathya Perlaa8e91792009-08-10 03:42:43 +00001484 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1485 do {
1486 while ((txcp = be_tx_compl_get(tx_cq))) {
1487 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1488 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001489 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001490 cmpl++;
1491 }
1492 if (cmpl) {
1493 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001494 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001495 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001496 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001497 }
1498
1499 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1500 break;
1501
1502 mdelay(1);
1503 } while (true);
1504
1505 if (atomic_read(&txq->used))
1506 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1507 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001508
1509 /* free posted tx for which compls will never arrive */
1510 while (atomic_read(&txq->used)) {
1511 sent_skb = sent_skbs[txq->tail];
1512 end_idx = txq->tail;
1513 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001514 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1515 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001516 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001517 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001518 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519}
1520
Sathya Perla5fb379e2009-06-18 00:02:59 +00001521static void be_mcc_queues_destroy(struct be_adapter *adapter)
1522{
1523 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001524
Sathya Perla8788fdc2009-07-27 22:52:03 +00001525 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001526 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001527 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001528 be_queue_free(adapter, q);
1529
Sathya Perla8788fdc2009-07-27 22:52:03 +00001530 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001531 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001532 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001533 be_queue_free(adapter, q);
1534}
1535
1536/* Must be called only after TX qs are created as MCC shares TX EQ */
1537static int be_mcc_queues_create(struct be_adapter *adapter)
1538{
1539 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001540
1541 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001542 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001543 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001544 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001545 goto err;
1546
1547 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001548 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001549 goto mcc_cq_free;
1550
1551 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001552 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001553 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1554 goto mcc_cq_destroy;
1555
1556 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001557 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001558 goto mcc_q_free;
1559
1560 return 0;
1561
1562mcc_q_free:
1563 be_queue_free(adapter, q);
1564mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001565 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001566mcc_cq_free:
1567 be_queue_free(adapter, cq);
1568err:
1569 return -1;
1570}
1571
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572static void be_tx_queues_destroy(struct be_adapter *adapter)
1573{
1574 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001575 struct be_tx_obj *txo;
1576 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577
Sathya Perla3c8def92011-06-12 20:01:58 +00001578 for_all_tx_queues(adapter, txo, i) {
1579 q = &txo->q;
1580 if (q->created)
1581 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1582 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583
Sathya Perla3c8def92011-06-12 20:01:58 +00001584 q = &txo->cq;
1585 if (q->created)
1586 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1587 be_queue_free(adapter, q);
1588 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589
Sathya Perla859b1e42009-08-10 03:43:51 +00001590 /* Clear any residual events */
1591 be_eq_clean(adapter, &adapter->tx_eq);
1592
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 q = &adapter->tx_eq.q;
1594 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001595 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 be_queue_free(adapter, q);
1597}
1598
Sathya Perla3c8def92011-06-12 20:01:58 +00001599/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600static int be_tx_queues_create(struct be_adapter *adapter)
1601{
1602 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001603 struct be_tx_obj *txo;
1604 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605
1606 adapter->tx_eq.max_eqd = 0;
1607 adapter->tx_eq.min_eqd = 0;
1608 adapter->tx_eq.cur_eqd = 96;
1609 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001612 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1613 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 return -1;
1615
Sathya Perla8788fdc2009-07-27 22:52:03 +00001616 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001617 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001618 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001619
Sathya Perla3c8def92011-06-12 20:01:58 +00001620 for_all_tx_queues(adapter, txo, i) {
1621 cq = &txo->cq;
1622 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001624 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625
Sathya Perla3c8def92011-06-12 20:01:58 +00001626 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1627 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628
Sathya Perla3c8def92011-06-12 20:01:58 +00001629 q = &txo->q;
1630 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1631 sizeof(struct be_eth_wrb)))
1632 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633
Sathya Perla3c8def92011-06-12 20:01:58 +00001634 if (be_cmd_txq_create(adapter, q, cq))
1635 goto err;
1636 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637 return 0;
1638
Sathya Perla3c8def92011-06-12 20:01:58 +00001639err:
1640 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641 return -1;
1642}
1643
1644static void be_rx_queues_destroy(struct be_adapter *adapter)
1645{
1646 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001647 struct be_rx_obj *rxo;
1648 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649
Sathya Perla3abcded2010-10-03 22:12:27 -07001650 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001651 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001652
Sathya Perla3abcded2010-10-03 22:12:27 -07001653 q = &rxo->cq;
1654 if (q->created)
1655 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1656 be_queue_free(adapter, q);
1657
Sathya Perla3abcded2010-10-03 22:12:27 -07001658 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001659 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001660 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001661 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663}
1664
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001665static u32 be_num_rxqs_want(struct be_adapter *adapter)
1666{
Sathya Perlac814fd32011-06-26 20:41:25 +00001667 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001668 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1669 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1670 } else {
1671 dev_warn(&adapter->pdev->dev,
1672 "No support for multiple RX queues\n");
1673 return 1;
1674 }
1675}
1676
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677static int be_rx_queues_create(struct be_adapter *adapter)
1678{
1679 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001680 struct be_rx_obj *rxo;
1681 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001683 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1684 msix_enabled(adapter) ?
1685 adapter->num_msix_vec - 1 : 1);
1686 if (adapter->num_rx_qs != MAX_RX_QS)
1687 dev_warn(&adapter->pdev->dev,
1688 "Can create only %d RX queues", adapter->num_rx_qs);
1689
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001690 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001691 for_all_rx_queues(adapter, rxo, i) {
1692 rxo->adapter = adapter;
1693 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1694 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
Sathya Perla3abcded2010-10-03 22:12:27 -07001696 /* EQ */
1697 eq = &rxo->rx_eq.q;
1698 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1699 sizeof(struct be_eq_entry));
1700 if (rc)
1701 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702
Sathya Perla3abcded2010-10-03 22:12:27 -07001703 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1704 if (rc)
1705 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001707 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001708
Sathya Perla3abcded2010-10-03 22:12:27 -07001709 /* CQ */
1710 cq = &rxo->cq;
1711 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1712 sizeof(struct be_eth_rx_compl));
1713 if (rc)
1714 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715
Sathya Perla3abcded2010-10-03 22:12:27 -07001716 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1717 if (rc)
1718 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001719
1720 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001721 q = &rxo->q;
1722 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1723 sizeof(struct be_eth_rx_d));
1724 if (rc)
1725 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726
Sathya Perla3abcded2010-10-03 22:12:27 -07001727 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728
1729 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001730err:
1731 be_rx_queues_destroy(adapter);
1732 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001735static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001736{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001737 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1738 if (!eqe->evt)
1739 return false;
1740 else
1741 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001742}
1743
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744static irqreturn_t be_intx(int irq, void *dev)
1745{
1746 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001747 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001748 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001750 if (lancer_chip(adapter)) {
1751 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001752 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001753 for_all_rx_queues(adapter, rxo, i) {
1754 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001755 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001756 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001758 if (!(tx || rx))
1759 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001760
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001761 } else {
1762 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1763 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1764 if (!isr)
1765 return IRQ_NONE;
1766
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001767 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001768 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001769
1770 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001771 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001772 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001773 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001774 }
Sathya Perlac001c212009-07-01 01:06:07 +00001775
Sathya Perla8788fdc2009-07-27 22:52:03 +00001776 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777}
1778
1779static irqreturn_t be_msix_rx(int irq, void *dev)
1780{
Sathya Perla3abcded2010-10-03 22:12:27 -07001781 struct be_rx_obj *rxo = dev;
1782 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783
Sathya Perla3c8def92011-06-12 20:01:58 +00001784 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001785
1786 return IRQ_HANDLED;
1787}
1788
Sathya Perla5fb379e2009-06-18 00:02:59 +00001789static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001790{
1791 struct be_adapter *adapter = dev;
1792
Sathya Perla3c8def92011-06-12 20:01:58 +00001793 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794
1795 return IRQ_HANDLED;
1796}
1797
Sathya Perla2e588f82011-03-11 02:49:26 +00001798static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799{
Sathya Perla2e588f82011-03-11 02:49:26 +00001800 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801}
1802
stephen hemminger49b05222010-10-21 07:50:48 +00001803static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804{
1805 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001806 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1807 struct be_adapter *adapter = rxo->adapter;
1808 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001809 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810 u32 work_done;
1811
Sathya Perlaac124ff2011-07-25 19:10:14 +00001812 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001814 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815 if (!rxcp)
1816 break;
1817
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001818 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001819 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001820 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001821 be_rx_compl_process_gro(adapter, rxo, rxcp);
1822 else
1823 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001824 } else if (rxcp->pkt_size == 0) {
1825 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001826 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001827
Sathya Perla2e588f82011-03-11 02:49:26 +00001828 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829 }
1830
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001832 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001833 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834
1835 /* All consumed */
1836 if (work_done < budget) {
1837 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001838 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839 } else {
1840 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001841 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842 }
1843 return work_done;
1844}
1845
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001846/* As TX and MCC share the same EQ check for both TX and MCC completions.
1847 * For TX/MCC we don't honour budget; consume everything
1848 */
1849static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001851 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1852 struct be_adapter *adapter =
1853 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001854 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001856 int tx_compl, mcc_compl, status = 0;
1857 u8 i;
1858 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859
Sathya Perla3c8def92011-06-12 20:01:58 +00001860 for_all_tx_queues(adapter, txo, i) {
1861 tx_compl = 0;
1862 num_wrbs = 0;
1863 while ((txcp = be_tx_compl_get(&txo->cq))) {
1864 num_wrbs += be_tx_compl_process(adapter, txo,
1865 AMAP_GET_BITS(struct amap_eth_tx_compl,
1866 wrb_index, txcp));
1867 tx_compl++;
1868 }
1869 if (tx_compl) {
1870 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1871
1872 atomic_sub(num_wrbs, &txo->q.used);
1873
1874 /* As Tx wrbs have been freed up, wake up netdev queue
1875 * if it was stopped due to lack of tx wrbs. */
1876 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1877 atomic_read(&txo->q.used) < txo->q.len / 2) {
1878 netif_wake_subqueue(adapter->netdev, i);
1879 }
1880
Sathya Perlaac124ff2011-07-25 19:10:14 +00001881 adapter->drv_stats.tx_events++;
1882 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perla3c8def92011-06-12 20:01:58 +00001883 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 }
1885
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001886 mcc_compl = be_process_mcc(adapter, &status);
1887
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001888 if (mcc_compl) {
1889 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1890 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1891 }
1892
Sathya Perla3c8def92011-06-12 20:01:58 +00001893 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001894
Sathya Perla3c8def92011-06-12 20:01:58 +00001895 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896 return 1;
1897}
1898
Ajit Khaparded053de92010-09-03 06:23:30 +00001899void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001900{
1901 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1902 u32 i;
1903
1904 pci_read_config_dword(adapter->pdev,
1905 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1906 pci_read_config_dword(adapter->pdev,
1907 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1908 pci_read_config_dword(adapter->pdev,
1909 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1910 pci_read_config_dword(adapter->pdev,
1911 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1912
1913 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1914 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1915
Ajit Khaparded053de92010-09-03 06:23:30 +00001916 if (ue_status_lo || ue_status_hi) {
1917 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001918 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001919 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1920 }
1921
Ajit Khaparde7c185272010-07-29 06:16:33 +00001922 if (ue_status_lo) {
1923 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1924 if (ue_status_lo & 1)
1925 dev_err(&adapter->pdev->dev,
1926 "UE: %s bit set\n", ue_status_low_desc[i]);
1927 }
1928 }
1929 if (ue_status_hi) {
1930 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1931 if (ue_status_hi & 1)
1932 dev_err(&adapter->pdev->dev,
1933 "UE: %s bit set\n", ue_status_hi_desc[i]);
1934 }
1935 }
1936
1937}
1938
Sathya Perlaea1dae12009-03-19 23:56:20 -07001939static void be_worker(struct work_struct *work)
1940{
1941 struct be_adapter *adapter =
1942 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001943 struct be_rx_obj *rxo;
1944 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001945
Sathya Perla16da8252011-03-21 20:49:27 +00001946 if (!adapter->ue_detected && !lancer_chip(adapter))
1947 be_detect_dump_ue(adapter);
1948
Somnath Koturf203af72010-10-25 23:01:03 +00001949 /* when interrupts are not yet enabled, just reap any pending
1950 * mcc completions */
1951 if (!netif_running(adapter->netdev)) {
1952 int mcc_compl, status = 0;
1953
1954 mcc_compl = be_process_mcc(adapter, &status);
1955
1956 if (mcc_compl) {
1957 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1958 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1959 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00001960
Somnath Koturf203af72010-10-25 23:01:03 +00001961 goto reschedule;
1962 }
1963
Selvin Xavier005d5692011-05-16 07:36:35 +00001964 if (!adapter->stats_cmd_sent) {
1965 if (lancer_chip(adapter))
1966 lancer_cmd_get_pport_stats(adapter,
1967 &adapter->stats_cmd);
1968 else
1969 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1970 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001971
Sathya Perla3abcded2010-10-03 22:12:27 -07001972 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001973 be_rx_eqd_update(adapter, rxo);
1974
1975 if (rxo->rx_post_starved) {
1976 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00001977 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07001978 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001979 }
1980
Somnath Koturf203af72010-10-25 23:01:03 +00001981reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00001982 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001983 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1984}
1985
Sathya Perla8d56ff12009-11-22 22:02:26 +00001986static void be_msix_disable(struct be_adapter *adapter)
1987{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001988 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00001989 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001990 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 }
1992}
1993
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994static void be_msix_enable(struct be_adapter *adapter)
1995{
Sathya Perla3abcded2010-10-03 22:12:27 -07001996#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001997 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001999 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002000
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002001 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002 adapter->msix_entries[i].entry = i;
2003
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002004 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002005 if (status == 0) {
2006 goto done;
2007 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002008 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002009 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002010 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002011 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002012 }
2013 return;
2014done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002015 adapter->num_msix_vec = num_vec;
2016 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017}
2018
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002019static void be_sriov_enable(struct be_adapter *adapter)
2020{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002021 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002022#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002023 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002024 int status, pos;
2025 u16 nvfs;
2026
2027 pos = pci_find_ext_capability(adapter->pdev,
2028 PCI_EXT_CAP_ID_SRIOV);
2029 pci_read_config_word(adapter->pdev,
2030 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2031
2032 if (num_vfs > nvfs) {
2033 dev_info(&adapter->pdev->dev,
2034 "Device supports %d VFs and not %d\n",
2035 nvfs, num_vfs);
2036 num_vfs = nvfs;
2037 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002038
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002039 status = pci_enable_sriov(adapter->pdev, num_vfs);
2040 adapter->sriov_enabled = status ? false : true;
2041 }
2042#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002043}
2044
2045static void be_sriov_disable(struct be_adapter *adapter)
2046{
2047#ifdef CONFIG_PCI_IOV
2048 if (adapter->sriov_enabled) {
2049 pci_disable_sriov(adapter->pdev);
2050 adapter->sriov_enabled = false;
2051 }
2052#endif
2053}
2054
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002055static inline int be_msix_vec_get(struct be_adapter *adapter,
2056 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002058 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002059}
2060
2061static int be_request_irq(struct be_adapter *adapter,
2062 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002063 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002064{
2065 struct net_device *netdev = adapter->netdev;
2066 int vec;
2067
2068 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002069 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002070 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002071}
2072
Sathya Perla3abcded2010-10-03 22:12:27 -07002073static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2074 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002075{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002076 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002077 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078}
2079
2080static int be_msix_register(struct be_adapter *adapter)
2081{
Sathya Perla3abcded2010-10-03 22:12:27 -07002082 struct be_rx_obj *rxo;
2083 int status, i;
2084 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002085
Sathya Perla3abcded2010-10-03 22:12:27 -07002086 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2087 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002088 if (status)
2089 goto err;
2090
Sathya Perla3abcded2010-10-03 22:12:27 -07002091 for_all_rx_queues(adapter, rxo, i) {
2092 sprintf(qname, "rxq%d", i);
2093 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2094 qname, rxo);
2095 if (status)
2096 goto err_msix;
2097 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002098
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002100
Sathya Perla3abcded2010-10-03 22:12:27 -07002101err_msix:
2102 be_free_irq(adapter, &adapter->tx_eq, adapter);
2103
2104 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2105 be_free_irq(adapter, &rxo->rx_eq, rxo);
2106
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107err:
2108 dev_warn(&adapter->pdev->dev,
2109 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002110 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111 return status;
2112}
2113
2114static int be_irq_register(struct be_adapter *adapter)
2115{
2116 struct net_device *netdev = adapter->netdev;
2117 int status;
2118
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002119 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120 status = be_msix_register(adapter);
2121 if (status == 0)
2122 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002123 /* INTx is not supported for VF */
2124 if (!be_physfn(adapter))
2125 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126 }
2127
2128 /* INTx */
2129 netdev->irq = adapter->pdev->irq;
2130 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2131 adapter);
2132 if (status) {
2133 dev_err(&adapter->pdev->dev,
2134 "INTx request IRQ failed - err %d\n", status);
2135 return status;
2136 }
2137done:
2138 adapter->isr_registered = true;
2139 return 0;
2140}
2141
2142static void be_irq_unregister(struct be_adapter *adapter)
2143{
2144 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002145 struct be_rx_obj *rxo;
2146 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002147
2148 if (!adapter->isr_registered)
2149 return;
2150
2151 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002152 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002153 free_irq(netdev->irq, adapter);
2154 goto done;
2155 }
2156
2157 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002158 be_free_irq(adapter, &adapter->tx_eq, adapter);
2159
2160 for_all_rx_queues(adapter, rxo, i)
2161 be_free_irq(adapter, &rxo->rx_eq, rxo);
2162
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002163done:
2164 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165}
2166
Sathya Perla482c9e72011-06-29 23:33:17 +00002167static void be_rx_queues_clear(struct be_adapter *adapter)
2168{
2169 struct be_queue_info *q;
2170 struct be_rx_obj *rxo;
2171 int i;
2172
2173 for_all_rx_queues(adapter, rxo, i) {
2174 q = &rxo->q;
2175 if (q->created) {
2176 be_cmd_rxq_destroy(adapter, q);
2177 /* After the rxq is invalidated, wait for a grace time
2178 * of 1ms for all dma to end and the flush compl to
2179 * arrive
2180 */
2181 mdelay(1);
2182 be_rx_q_clean(adapter, rxo);
2183 }
2184
2185 /* Clear any residual events */
2186 q = &rxo->rx_eq.q;
2187 if (q->created)
2188 be_eq_clean(adapter, &rxo->rx_eq);
2189 }
2190}
2191
Sathya Perla889cd4b2010-05-30 23:33:45 +00002192static int be_close(struct net_device *netdev)
2193{
2194 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002195 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002196 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002197 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002198 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002199
Sathya Perla889cd4b2010-05-30 23:33:45 +00002200 be_async_mcc_disable(adapter);
2201
Sathya Perla889cd4b2010-05-30 23:33:45 +00002202 netif_carrier_off(netdev);
2203 adapter->link_up = false;
2204
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002205 if (!lancer_chip(adapter))
2206 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002207
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002208 for_all_rx_queues(adapter, rxo, i)
2209 napi_disable(&rxo->rx_eq.napi);
2210
2211 napi_disable(&tx_eq->napi);
2212
2213 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002214 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2215 for_all_rx_queues(adapter, rxo, i)
2216 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002217 for_all_tx_queues(adapter, txo, i)
2218 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002219 }
2220
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002221 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002222 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002223 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002224
2225 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002226 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002227 synchronize_irq(vec);
2228 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002229 } else {
2230 synchronize_irq(netdev->irq);
2231 }
2232 be_irq_unregister(adapter);
2233
Sathya Perla889cd4b2010-05-30 23:33:45 +00002234 /* Wait for all pending tx completions to arrive so that
2235 * all tx skbs are freed.
2236 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002237 for_all_tx_queues(adapter, txo, i)
2238 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002239
Sathya Perla482c9e72011-06-29 23:33:17 +00002240 be_rx_queues_clear(adapter);
2241 return 0;
2242}
2243
2244static int be_rx_queues_setup(struct be_adapter *adapter)
2245{
2246 struct be_rx_obj *rxo;
2247 int rc, i;
2248 u8 rsstable[MAX_RSS_QS];
2249
2250 for_all_rx_queues(adapter, rxo, i) {
2251 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2252 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2253 adapter->if_handle,
2254 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2255 if (rc)
2256 return rc;
2257 }
2258
2259 if (be_multi_rxq(adapter)) {
2260 for_all_rss_queues(adapter, rxo, i)
2261 rsstable[i] = rxo->rss_id;
2262
2263 rc = be_cmd_rss_config(adapter, rsstable,
2264 adapter->num_rx_qs - 1);
2265 if (rc)
2266 return rc;
2267 }
2268
2269 /* First time posting */
2270 for_all_rx_queues(adapter, rxo, i) {
2271 be_post_rx_frags(rxo, GFP_KERNEL);
2272 napi_enable(&rxo->rx_eq.napi);
2273 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002274 return 0;
2275}
2276
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277static int be_open(struct net_device *netdev)
2278{
2279 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002281 struct be_rx_obj *rxo;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002282 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002283 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002284 u8 mac_speed;
2285 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002286
Sathya Perla482c9e72011-06-29 23:33:17 +00002287 status = be_rx_queues_setup(adapter);
2288 if (status)
2289 goto err;
2290
Sathya Perla5fb379e2009-06-18 00:02:59 +00002291 napi_enable(&tx_eq->napi);
2292
2293 be_irq_register(adapter);
2294
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002295 if (!lancer_chip(adapter))
2296 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002297
2298 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 for_all_rx_queues(adapter, rxo, i) {
2300 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2301 be_cq_notify(adapter, rxo->cq.id, true, 0);
2302 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002303 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002304
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002305 /* Now that interrupts are on we can process async mcc */
2306 be_async_mcc_enable(adapter);
2307
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002308 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002309 &link_speed, 0);
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002310 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002311 goto err;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002312 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002313
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002314 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002315 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002316 if (status)
2317 goto err;
2318
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002319 status = be_cmd_set_flow_control(adapter,
2320 adapter->tx_fc, adapter->rx_fc);
2321 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002322 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002323 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002324
Sathya Perla889cd4b2010-05-30 23:33:45 +00002325 return 0;
2326err:
2327 be_close(adapter->netdev);
2328 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002329}
2330
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002331static int be_setup_wol(struct be_adapter *adapter, bool enable)
2332{
2333 struct be_dma_mem cmd;
2334 int status = 0;
2335 u8 mac[ETH_ALEN];
2336
2337 memset(mac, 0, ETH_ALEN);
2338
2339 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002340 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2341 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002342 if (cmd.va == NULL)
2343 return -1;
2344 memset(cmd.va, 0, cmd.size);
2345
2346 if (enable) {
2347 status = pci_write_config_dword(adapter->pdev,
2348 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2349 if (status) {
2350 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002351 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002352 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2353 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002354 return status;
2355 }
2356 status = be_cmd_enable_magic_wol(adapter,
2357 adapter->netdev->dev_addr, &cmd);
2358 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2359 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2360 } else {
2361 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2362 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2363 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2364 }
2365
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002366 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002367 return status;
2368}
2369
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002370/*
2371 * Generate a seed MAC address from the PF MAC Address using jhash.
2372 * MAC Address for VFs are assigned incrementally starting from the seed.
2373 * These addresses are programmed in the ASIC by the PF and the VF driver
2374 * queries for the MAC address during its probe.
2375 */
2376static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2377{
2378 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002379 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002380 u8 mac[ETH_ALEN];
2381
2382 be_vf_eth_addr_generate(adapter, mac);
2383
2384 for (vf = 0; vf < num_vfs; vf++) {
2385 status = be_cmd_pmac_add(adapter, mac,
2386 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002387 &adapter->vf_cfg[vf].vf_pmac_id,
2388 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002389 if (status)
2390 dev_err(&adapter->pdev->dev,
2391 "Mac address add failed for VF %d\n", vf);
2392 else
2393 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2394
2395 mac[5] += 1;
2396 }
2397 return status;
2398}
2399
2400static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2401{
2402 u32 vf;
2403
2404 for (vf = 0; vf < num_vfs; vf++) {
2405 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2406 be_cmd_pmac_del(adapter,
2407 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002408 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002409 }
2410}
2411
Sathya Perla5fb379e2009-06-18 00:02:59 +00002412static int be_setup(struct be_adapter *adapter)
2413{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002414 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002415 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002416 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002417 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002419 be_cmd_req_native_mode(adapter);
2420
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002421 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2422 BE_IF_FLAGS_BROADCAST |
2423 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002424
2425 if (be_physfn(adapter)) {
2426 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2427 BE_IF_FLAGS_PROMISCUOUS |
2428 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2429 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002430
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002431 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002432 cap_flags |= BE_IF_FLAGS_RSS;
2433 en_flags |= BE_IF_FLAGS_RSS;
2434 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002435 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002436
2437 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2438 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002439 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440 if (status != 0)
2441 goto do_none;
2442
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002443 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002444 if (adapter->sriov_enabled) {
2445 while (vf < num_vfs) {
2446 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2447 BE_IF_FLAGS_BROADCAST;
2448 status = be_cmd_if_create(adapter, cap_flags,
2449 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002450 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002451 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002452 if (status) {
2453 dev_err(&adapter->pdev->dev,
2454 "Interface Create failed for VF %d\n",
2455 vf);
2456 goto if_destroy;
2457 }
2458 adapter->vf_cfg[vf].vf_pmac_id =
2459 BE_INVALID_PMAC_ID;
2460 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002461 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002462 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002463 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002464 status = be_cmd_mac_addr_query(adapter, mac,
2465 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2466 if (!status) {
2467 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2468 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2469 }
2470 }
2471
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002472 status = be_tx_queues_create(adapter);
2473 if (status != 0)
2474 goto if_destroy;
2475
2476 status = be_rx_queues_create(adapter);
2477 if (status != 0)
2478 goto tx_qs_destroy;
2479
Sathya Perla2903dd62011-06-26 20:41:53 +00002480 /* Allow all priorities by default. A GRP5 evt may modify this */
2481 adapter->vlan_prio_bmap = 0xff;
2482
Sathya Perla5fb379e2009-06-18 00:02:59 +00002483 status = be_mcc_queues_create(adapter);
2484 if (status != 0)
2485 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002486
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002487 adapter->link_speed = -1;
2488
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002489 return 0;
2490
Sathya Perla5fb379e2009-06-18 00:02:59 +00002491rx_qs_destroy:
2492 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002493tx_qs_destroy:
2494 be_tx_queues_destroy(adapter);
2495if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002496 if (be_physfn(adapter) && adapter->sriov_enabled)
2497 for (vf = 0; vf < num_vfs; vf++)
2498 if (adapter->vf_cfg[vf].vf_if_handle)
2499 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002500 adapter->vf_cfg[vf].vf_if_handle,
2501 vf + 1);
2502 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002503do_none:
2504 return status;
2505}
2506
Sathya Perla5fb379e2009-06-18 00:02:59 +00002507static int be_clear(struct be_adapter *adapter)
2508{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002509 int vf;
2510
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002511 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002512 be_vf_eth_addr_rem(adapter);
2513
Sathya Perla1a8887d2009-08-17 00:58:41 +00002514 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002515 be_rx_queues_destroy(adapter);
2516 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002517 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002518
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002519 if (be_physfn(adapter) && adapter->sriov_enabled)
2520 for (vf = 0; vf < num_vfs; vf++)
2521 if (adapter->vf_cfg[vf].vf_if_handle)
2522 be_cmd_if_destroy(adapter,
2523 adapter->vf_cfg[vf].vf_if_handle,
2524 vf + 1);
2525
Ajit Khaparde658681f2011-02-11 13:34:46 +00002526 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002527
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002528 adapter->be3_native = 0;
2529
Sathya Perla2243e2e2009-11-22 22:02:03 +00002530 /* tell fw we're done with firing cmds */
2531 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002532 return 0;
2533}
2534
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002535
Ajit Khaparde84517482009-09-04 03:12:16 +00002536#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002537static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002538 const u8 *p, u32 img_start, int image_size,
2539 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002540{
2541 u32 crc_offset;
2542 u8 flashed_crc[4];
2543 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002544
2545 crc_offset = hdr_size + img_start + image_size - 4;
2546
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002547 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002548
2549 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002550 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002551 if (status) {
2552 dev_err(&adapter->pdev->dev,
2553 "could not get crc from flash, not flashing redboot\n");
2554 return false;
2555 }
2556
2557 /*update redboot only if crc does not match*/
2558 if (!memcmp(flashed_crc, p, 4))
2559 return false;
2560 else
2561 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002562}
2563
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002564static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002565 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002566 struct be_dma_mem *flash_cmd, int num_of_images)
2567
Ajit Khaparde84517482009-09-04 03:12:16 +00002568{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002569 int status = 0, i, filehdr_size = 0;
2570 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002571 int num_bytes;
2572 const u8 *p = fw->data;
2573 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002574 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002575 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002576
Joe Perches215faf92010-12-21 02:16:10 -08002577 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002578 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2579 FLASH_IMAGE_MAX_SIZE_g3},
2580 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2581 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2582 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2583 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2584 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2585 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2586 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2587 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2588 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2589 FLASH_IMAGE_MAX_SIZE_g3},
2590 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2591 FLASH_IMAGE_MAX_SIZE_g3},
2592 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002593 FLASH_IMAGE_MAX_SIZE_g3},
2594 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2595 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002596 };
Joe Perches215faf92010-12-21 02:16:10 -08002597 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002598 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2599 FLASH_IMAGE_MAX_SIZE_g2},
2600 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2601 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2602 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2603 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2604 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2605 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2606 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2607 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2608 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2609 FLASH_IMAGE_MAX_SIZE_g2},
2610 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2611 FLASH_IMAGE_MAX_SIZE_g2},
2612 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2613 FLASH_IMAGE_MAX_SIZE_g2}
2614 };
2615
2616 if (adapter->generation == BE_GEN3) {
2617 pflashcomp = gen3_flash_types;
2618 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002619 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002620 } else {
2621 pflashcomp = gen2_flash_types;
2622 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002623 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002624 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002625 for (i = 0; i < num_comp; i++) {
2626 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2627 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2628 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002629 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2630 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002631 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2632 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002633 continue;
2634 p = fw->data;
2635 p += filehdr_size + pflashcomp[i].offset
2636 + (num_of_images * sizeof(struct image_hdr));
2637 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002638 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002639 total_bytes = pflashcomp[i].size;
2640 while (total_bytes) {
2641 if (total_bytes > 32*1024)
2642 num_bytes = 32*1024;
2643 else
2644 num_bytes = total_bytes;
2645 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002646
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002647 if (!total_bytes)
2648 flash_op = FLASHROM_OPER_FLASH;
2649 else
2650 flash_op = FLASHROM_OPER_SAVE;
2651 memcpy(req->params.data_buf, p, num_bytes);
2652 p += num_bytes;
2653 status = be_cmd_write_flashrom(adapter, flash_cmd,
2654 pflashcomp[i].optype, flash_op, num_bytes);
2655 if (status) {
2656 dev_err(&adapter->pdev->dev,
2657 "cmd to write to flash rom failed.\n");
2658 return -1;
2659 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002660 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002661 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002662 return 0;
2663}
2664
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002665static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2666{
2667 if (fhdr == NULL)
2668 return 0;
2669 if (fhdr->build[0] == '3')
2670 return BE_GEN3;
2671 else if (fhdr->build[0] == '2')
2672 return BE_GEN2;
2673 else
2674 return 0;
2675}
2676
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002677static int lancer_fw_download(struct be_adapter *adapter,
2678 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002679{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002680#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2681#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2682 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002683 const u8 *data_ptr = NULL;
2684 u8 *dest_image_ptr = NULL;
2685 size_t image_size = 0;
2686 u32 chunk_size = 0;
2687 u32 data_written = 0;
2688 u32 offset = 0;
2689 int status = 0;
2690 u8 add_status = 0;
2691
2692 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2693 dev_err(&adapter->pdev->dev,
2694 "FW Image not properly aligned. "
2695 "Length must be 4 byte aligned.\n");
2696 status = -EINVAL;
2697 goto lancer_fw_exit;
2698 }
2699
2700 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2701 + LANCER_FW_DOWNLOAD_CHUNK;
2702 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2703 &flash_cmd.dma, GFP_KERNEL);
2704 if (!flash_cmd.va) {
2705 status = -ENOMEM;
2706 dev_err(&adapter->pdev->dev,
2707 "Memory allocation failure while flashing\n");
2708 goto lancer_fw_exit;
2709 }
2710
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002711 dest_image_ptr = flash_cmd.va +
2712 sizeof(struct lancer_cmd_req_write_object);
2713 image_size = fw->size;
2714 data_ptr = fw->data;
2715
2716 while (image_size) {
2717 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2718
2719 /* Copy the image chunk content. */
2720 memcpy(dest_image_ptr, data_ptr, chunk_size);
2721
2722 status = lancer_cmd_write_object(adapter, &flash_cmd,
2723 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2724 &data_written, &add_status);
2725
2726 if (status)
2727 break;
2728
2729 offset += data_written;
2730 data_ptr += data_written;
2731 image_size -= data_written;
2732 }
2733
2734 if (!status) {
2735 /* Commit the FW written */
2736 status = lancer_cmd_write_object(adapter, &flash_cmd,
2737 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2738 &data_written, &add_status);
2739 }
2740
2741 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2742 flash_cmd.dma);
2743 if (status) {
2744 dev_err(&adapter->pdev->dev,
2745 "Firmware load error. "
2746 "Status code: 0x%x Additional Status: 0x%x\n",
2747 status, add_status);
2748 goto lancer_fw_exit;
2749 }
2750
2751 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2752lancer_fw_exit:
2753 return status;
2754}
2755
2756static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2757{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002758 struct flash_file_hdr_g2 *fhdr;
2759 struct flash_file_hdr_g3 *fhdr3;
2760 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002761 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002762 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002763 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002764
2765 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002766 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002767
Ajit Khaparde84517482009-09-04 03:12:16 +00002768 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002769 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2770 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002771 if (!flash_cmd.va) {
2772 status = -ENOMEM;
2773 dev_err(&adapter->pdev->dev,
2774 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002775 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002776 }
2777
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002778 if ((adapter->generation == BE_GEN3) &&
2779 (get_ufigen_type(fhdr) == BE_GEN3)) {
2780 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002781 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2782 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002783 img_hdr_ptr = (struct image_hdr *) (fw->data +
2784 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002785 i * sizeof(struct image_hdr)));
2786 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2787 status = be_flash_data(adapter, fw, &flash_cmd,
2788 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002789 }
2790 } else if ((adapter->generation == BE_GEN2) &&
2791 (get_ufigen_type(fhdr) == BE_GEN2)) {
2792 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2793 } else {
2794 dev_err(&adapter->pdev->dev,
2795 "UFI and Interface are not compatible for flashing\n");
2796 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002797 }
2798
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002799 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2800 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002801 if (status) {
2802 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002803 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002804 }
2805
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002806 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002807
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002808be_fw_exit:
2809 return status;
2810}
2811
2812int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2813{
2814 const struct firmware *fw;
2815 int status;
2816
2817 if (!netif_running(adapter->netdev)) {
2818 dev_err(&adapter->pdev->dev,
2819 "Firmware load not allowed (interface is down)\n");
2820 return -1;
2821 }
2822
2823 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2824 if (status)
2825 goto fw_exit;
2826
2827 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2828
2829 if (lancer_chip(adapter))
2830 status = lancer_fw_download(adapter, fw);
2831 else
2832 status = be_fw_download(adapter, fw);
2833
Ajit Khaparde84517482009-09-04 03:12:16 +00002834fw_exit:
2835 release_firmware(fw);
2836 return status;
2837}
2838
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002839static struct net_device_ops be_netdev_ops = {
2840 .ndo_open = be_open,
2841 .ndo_stop = be_close,
2842 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002843 .ndo_set_rx_mode = be_set_multicast_list,
2844 .ndo_set_mac_address = be_mac_addr_set,
2845 .ndo_change_mtu = be_change_mtu,
2846 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002847 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2848 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002849 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002850 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002851 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002852 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853};
2854
2855static void be_netdev_init(struct net_device *netdev)
2856{
2857 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002858 struct be_rx_obj *rxo;
2859 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002860
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002861 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002862 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2863 NETIF_F_HW_VLAN_TX;
2864 if (be_multi_rxq(adapter))
2865 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002866
2867 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002868 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002869
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002870 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002871 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002872
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002873 netdev->flags |= IFF_MULTICAST;
2874
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002875 /* Default settings for Rx and Tx flow control */
2876 adapter->rx_fc = true;
2877 adapter->tx_fc = true;
2878
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002879 netif_set_gso_max_size(netdev, 65535);
2880
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002881 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2882
2883 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2884
Sathya Perla3abcded2010-10-03 22:12:27 -07002885 for_all_rx_queues(adapter, rxo, i)
2886 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2887 BE_NAPI_WEIGHT);
2888
Sathya Perla5fb379e2009-06-18 00:02:59 +00002889 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002890 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002891}
2892
2893static void be_unmap_pci_bars(struct be_adapter *adapter)
2894{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002895 if (adapter->csr)
2896 iounmap(adapter->csr);
2897 if (adapter->db)
2898 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002899 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002900 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002901}
2902
2903static int be_map_pci_bars(struct be_adapter *adapter)
2904{
2905 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002906 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002907
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002908 if (lancer_chip(adapter)) {
2909 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2910 pci_resource_len(adapter->pdev, 0));
2911 if (addr == NULL)
2912 return -ENOMEM;
2913 adapter->db = addr;
2914 return 0;
2915 }
2916
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002917 if (be_physfn(adapter)) {
2918 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2919 pci_resource_len(adapter->pdev, 2));
2920 if (addr == NULL)
2921 return -ENOMEM;
2922 adapter->csr = addr;
2923 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002925 if (adapter->generation == BE_GEN2) {
2926 pcicfg_reg = 1;
2927 db_reg = 4;
2928 } else {
2929 pcicfg_reg = 0;
2930 if (be_physfn(adapter))
2931 db_reg = 4;
2932 else
2933 db_reg = 0;
2934 }
2935 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2936 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937 if (addr == NULL)
2938 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002939 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002940
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002941 if (be_physfn(adapter)) {
2942 addr = ioremap_nocache(
2943 pci_resource_start(adapter->pdev, pcicfg_reg),
2944 pci_resource_len(adapter->pdev, pcicfg_reg));
2945 if (addr == NULL)
2946 goto pci_map_err;
2947 adapter->pcicfg = addr;
2948 } else
2949 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002950
2951 return 0;
2952pci_map_err:
2953 be_unmap_pci_bars(adapter);
2954 return -ENOMEM;
2955}
2956
2957
2958static void be_ctrl_cleanup(struct be_adapter *adapter)
2959{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002960 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002961
2962 be_unmap_pci_bars(adapter);
2963
2964 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002965 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2966 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002967
2968 mem = &adapter->mc_cmd_mem;
2969 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002970 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2971 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002972}
2973
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002974static int be_ctrl_init(struct be_adapter *adapter)
2975{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002976 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2977 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002978 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002979 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980
2981 status = be_map_pci_bars(adapter);
2982 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002983 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002984
2985 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002986 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2987 mbox_mem_alloc->size,
2988 &mbox_mem_alloc->dma,
2989 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002990 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002991 status = -ENOMEM;
2992 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002993 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002994
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002995 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2996 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2997 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2998 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002999
3000 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003001 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3002 mc_cmd_mem->size, &mc_cmd_mem->dma,
3003 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003004 if (mc_cmd_mem->va == NULL) {
3005 status = -ENOMEM;
3006 goto free_mbox;
3007 }
3008 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3009
Ivan Vecera29849612010-12-14 05:43:19 +00003010 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003011 spin_lock_init(&adapter->mcc_lock);
3012 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003013
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003014 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003015 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003016 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003017
3018free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003019 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3020 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003021
3022unmap_pci_bars:
3023 be_unmap_pci_bars(adapter);
3024
3025done:
3026 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003027}
3028
3029static void be_stats_cleanup(struct be_adapter *adapter)
3030{
Sathya Perla3abcded2010-10-03 22:12:27 -07003031 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003032
3033 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003034 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3035 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003036}
3037
3038static int be_stats_init(struct be_adapter *adapter)
3039{
Sathya Perla3abcded2010-10-03 22:12:27 -07003040 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003041
Selvin Xavier005d5692011-05-16 07:36:35 +00003042 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003043 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003044 } else {
3045 if (lancer_chip(adapter))
3046 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3047 else
3048 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3049 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003050 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3051 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052 if (cmd->va == NULL)
3053 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003054 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003055 return 0;
3056}
3057
3058static void __devexit be_remove(struct pci_dev *pdev)
3059{
3060 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003061
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062 if (!adapter)
3063 return;
3064
Somnath Koturf203af72010-10-25 23:01:03 +00003065 cancel_delayed_work_sync(&adapter->work);
3066
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067 unregister_netdev(adapter->netdev);
3068
Sathya Perla5fb379e2009-06-18 00:02:59 +00003069 be_clear(adapter);
3070
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071 be_stats_cleanup(adapter);
3072
3073 be_ctrl_cleanup(adapter);
3074
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003075 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003076 be_sriov_disable(adapter);
3077
Sathya Perla8d56ff12009-11-22 22:02:26 +00003078 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003079
3080 pci_set_drvdata(pdev, NULL);
3081 pci_release_regions(pdev);
3082 pci_disable_device(pdev);
3083
3084 free_netdev(adapter->netdev);
3085}
3086
Sathya Perla2243e2e2009-11-22 22:02:03 +00003087static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003088{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003089 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003090 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003091
Sathya Perla8788fdc2009-07-27 22:52:03 +00003092 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003093 if (status)
3094 return status;
3095
Sathya Perla3abcded2010-10-03 22:12:27 -07003096 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3097 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003098 if (status)
3099 return status;
3100
3101 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003102
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003103 /* A default permanent address is given to each VF for Lancer*/
3104 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003105 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003106 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003107
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003108 if (status)
3109 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003110
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003111 if (!is_valid_ether_addr(mac))
3112 return -EADDRNOTAVAIL;
3113
3114 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3115 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3116 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003117
Ajit Khaparde3486be22010-07-23 02:04:54 +00003118 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003119 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3120 else
3121 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3122
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003123 status = be_cmd_get_cntl_attributes(adapter);
3124 if (status)
3125 return status;
3126
Sathya Perla3c8def92011-06-12 20:01:58 +00003127 if ((num_vfs && adapter->sriov_enabled) ||
3128 (adapter->function_mode & 0x400) ||
3129 lancer_chip(adapter) || !be_physfn(adapter)) {
3130 adapter->num_tx_qs = 1;
3131 netif_set_real_num_tx_queues(adapter->netdev,
3132 adapter->num_tx_qs);
3133 } else {
3134 adapter->num_tx_qs = MAX_TX_QS;
3135 }
3136
Sathya Perla2243e2e2009-11-22 22:02:03 +00003137 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003138}
3139
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003140static int be_dev_family_check(struct be_adapter *adapter)
3141{
3142 struct pci_dev *pdev = adapter->pdev;
3143 u32 sli_intf = 0, if_type;
3144
3145 switch (pdev->device) {
3146 case BE_DEVICE_ID1:
3147 case OC_DEVICE_ID1:
3148 adapter->generation = BE_GEN2;
3149 break;
3150 case BE_DEVICE_ID2:
3151 case OC_DEVICE_ID2:
3152 adapter->generation = BE_GEN3;
3153 break;
3154 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003155 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003156 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3157 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3158 SLI_INTF_IF_TYPE_SHIFT;
3159
3160 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3161 if_type != 0x02) {
3162 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3163 return -EINVAL;
3164 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003165 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3166 SLI_INTF_FAMILY_SHIFT);
3167 adapter->generation = BE_GEN3;
3168 break;
3169 default:
3170 adapter->generation = 0;
3171 }
3172 return 0;
3173}
3174
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003175static int lancer_wait_ready(struct be_adapter *adapter)
3176{
3177#define SLIPORT_READY_TIMEOUT 500
3178 u32 sliport_status;
3179 int status = 0, i;
3180
3181 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3182 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3183 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3184 break;
3185
3186 msleep(20);
3187 }
3188
3189 if (i == SLIPORT_READY_TIMEOUT)
3190 status = -1;
3191
3192 return status;
3193}
3194
3195static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3196{
3197 int status;
3198 u32 sliport_status, err, reset_needed;
3199 status = lancer_wait_ready(adapter);
3200 if (!status) {
3201 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3202 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3203 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3204 if (err && reset_needed) {
3205 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3206 adapter->db + SLIPORT_CONTROL_OFFSET);
3207
3208 /* check adapter has corrected the error */
3209 status = lancer_wait_ready(adapter);
3210 sliport_status = ioread32(adapter->db +
3211 SLIPORT_STATUS_OFFSET);
3212 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3213 SLIPORT_STATUS_RN_MASK);
3214 if (status || sliport_status)
3215 status = -1;
3216 } else if (err || reset_needed) {
3217 status = -1;
3218 }
3219 }
3220 return status;
3221}
3222
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003223static int __devinit be_probe(struct pci_dev *pdev,
3224 const struct pci_device_id *pdev_id)
3225{
3226 int status = 0;
3227 struct be_adapter *adapter;
3228 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003229
3230 status = pci_enable_device(pdev);
3231 if (status)
3232 goto do_none;
3233
3234 status = pci_request_regions(pdev, DRV_NAME);
3235 if (status)
3236 goto disable_dev;
3237 pci_set_master(pdev);
3238
Sathya Perla3c8def92011-06-12 20:01:58 +00003239 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003240 if (netdev == NULL) {
3241 status = -ENOMEM;
3242 goto rel_reg;
3243 }
3244 adapter = netdev_priv(netdev);
3245 adapter->pdev = pdev;
3246 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003247
3248 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003249 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003250 goto free_netdev;
3251
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003252 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003253 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003254
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003255 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003256 if (!status) {
3257 netdev->features |= NETIF_F_HIGHDMA;
3258 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003259 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003260 if (status) {
3261 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3262 goto free_netdev;
3263 }
3264 }
3265
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003266 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003267 if (adapter->sriov_enabled) {
3268 adapter->vf_cfg = kcalloc(num_vfs,
3269 sizeof(struct be_vf_cfg), GFP_KERNEL);
3270
3271 if (!adapter->vf_cfg)
3272 goto free_netdev;
3273 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003274
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003275 status = be_ctrl_init(adapter);
3276 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003277 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003278
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003279 if (lancer_chip(adapter)) {
3280 status = lancer_test_and_set_rdy_state(adapter);
3281 if (status) {
3282 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003283 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003284 }
3285 }
3286
Sathya Perla2243e2e2009-11-22 22:02:03 +00003287 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003288 if (be_physfn(adapter)) {
3289 status = be_cmd_POST(adapter);
3290 if (status)
3291 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003292 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003293
3294 /* tell fw we're ready to fire cmds */
3295 status = be_cmd_fw_init(adapter);
3296 if (status)
3297 goto ctrl_clean;
3298
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003299 status = be_cmd_reset_function(adapter);
3300 if (status)
3301 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003303 status = be_stats_init(adapter);
3304 if (status)
3305 goto ctrl_clean;
3306
Sathya Perla2243e2e2009-11-22 22:02:03 +00003307 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003308 if (status)
3309 goto stats_clean;
3310
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003311 /* The INTR bit may be set in the card when probed by a kdump kernel
3312 * after a crash.
3313 */
3314 if (!lancer_chip(adapter))
3315 be_intr_set(adapter, false);
3316
Sathya Perla3abcded2010-10-03 22:12:27 -07003317 be_msix_enable(adapter);
3318
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003319 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003320
Sathya Perla5fb379e2009-06-18 00:02:59 +00003321 status = be_setup(adapter);
3322 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003323 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003324
Sathya Perla3abcded2010-10-03 22:12:27 -07003325 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003326 status = register_netdev(netdev);
3327 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003328 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003329 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003330
Ajit Khapardee6319362011-02-11 13:35:41 +00003331 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003332 u8 mac_speed;
3333 bool link_up;
3334 u16 vf, lnk_speed;
3335
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003336 if (!lancer_chip(adapter)) {
3337 status = be_vf_eth_addr_config(adapter);
3338 if (status)
3339 goto unreg_netdev;
3340 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003341
3342 for (vf = 0; vf < num_vfs; vf++) {
3343 status = be_cmd_link_status_query(adapter, &link_up,
3344 &mac_speed, &lnk_speed, vf + 1);
3345 if (!status)
3346 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3347 else
3348 goto unreg_netdev;
3349 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003350 }
3351
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003352 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003353
Somnath Koturf203af72010-10-25 23:01:03 +00003354 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003355 return 0;
3356
Ajit Khapardee6319362011-02-11 13:35:41 +00003357unreg_netdev:
3358 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003359unsetup:
3360 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003361msix_disable:
3362 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003363stats_clean:
3364 be_stats_cleanup(adapter);
3365ctrl_clean:
3366 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003367free_vf_cfg:
3368 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003369free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003370 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003371 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003372 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003373rel_reg:
3374 pci_release_regions(pdev);
3375disable_dev:
3376 pci_disable_device(pdev);
3377do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003378 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003379 return status;
3380}
3381
3382static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3383{
3384 struct be_adapter *adapter = pci_get_drvdata(pdev);
3385 struct net_device *netdev = adapter->netdev;
3386
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003387 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003388 if (adapter->wol)
3389 be_setup_wol(adapter, true);
3390
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003391 netif_device_detach(netdev);
3392 if (netif_running(netdev)) {
3393 rtnl_lock();
3394 be_close(netdev);
3395 rtnl_unlock();
3396 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003397 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003398 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003399
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003400 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003401 pci_save_state(pdev);
3402 pci_disable_device(pdev);
3403 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3404 return 0;
3405}
3406
3407static int be_resume(struct pci_dev *pdev)
3408{
3409 int status = 0;
3410 struct be_adapter *adapter = pci_get_drvdata(pdev);
3411 struct net_device *netdev = adapter->netdev;
3412
3413 netif_device_detach(netdev);
3414
3415 status = pci_enable_device(pdev);
3416 if (status)
3417 return status;
3418
3419 pci_set_power_state(pdev, 0);
3420 pci_restore_state(pdev);
3421
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003422 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003423 /* tell fw we're ready to fire cmds */
3424 status = be_cmd_fw_init(adapter);
3425 if (status)
3426 return status;
3427
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003428 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003429 if (netif_running(netdev)) {
3430 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003431 be_open(netdev);
3432 rtnl_unlock();
3433 }
3434 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003435
3436 if (adapter->wol)
3437 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003438
3439 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003440 return 0;
3441}
3442
Sathya Perla82456b02010-02-17 01:35:37 +00003443/*
3444 * An FLR will stop BE from DMAing any data.
3445 */
3446static void be_shutdown(struct pci_dev *pdev)
3447{
3448 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003449
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003450 if (!adapter)
3451 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003452
Sathya Perla0f4a6822011-03-21 20:49:28 +00003453 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003454
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003455 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003456
Sathya Perla82456b02010-02-17 01:35:37 +00003457 if (adapter->wol)
3458 be_setup_wol(adapter, true);
3459
Ajit Khaparde57841862011-04-06 18:08:43 +00003460 be_cmd_reset_function(adapter);
3461
Sathya Perla82456b02010-02-17 01:35:37 +00003462 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003463}
3464
Sathya Perlacf588472010-02-14 21:22:01 +00003465static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3466 pci_channel_state_t state)
3467{
3468 struct be_adapter *adapter = pci_get_drvdata(pdev);
3469 struct net_device *netdev = adapter->netdev;
3470
3471 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3472
3473 adapter->eeh_err = true;
3474
3475 netif_device_detach(netdev);
3476
3477 if (netif_running(netdev)) {
3478 rtnl_lock();
3479 be_close(netdev);
3480 rtnl_unlock();
3481 }
3482 be_clear(adapter);
3483
3484 if (state == pci_channel_io_perm_failure)
3485 return PCI_ERS_RESULT_DISCONNECT;
3486
3487 pci_disable_device(pdev);
3488
3489 return PCI_ERS_RESULT_NEED_RESET;
3490}
3491
3492static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3493{
3494 struct be_adapter *adapter = pci_get_drvdata(pdev);
3495 int status;
3496
3497 dev_info(&adapter->pdev->dev, "EEH reset\n");
3498 adapter->eeh_err = false;
3499
3500 status = pci_enable_device(pdev);
3501 if (status)
3502 return PCI_ERS_RESULT_DISCONNECT;
3503
3504 pci_set_master(pdev);
3505 pci_set_power_state(pdev, 0);
3506 pci_restore_state(pdev);
3507
3508 /* Check if card is ok and fw is ready */
3509 status = be_cmd_POST(adapter);
3510 if (status)
3511 return PCI_ERS_RESULT_DISCONNECT;
3512
3513 return PCI_ERS_RESULT_RECOVERED;
3514}
3515
3516static void be_eeh_resume(struct pci_dev *pdev)
3517{
3518 int status = 0;
3519 struct be_adapter *adapter = pci_get_drvdata(pdev);
3520 struct net_device *netdev = adapter->netdev;
3521
3522 dev_info(&adapter->pdev->dev, "EEH resume\n");
3523
3524 pci_save_state(pdev);
3525
3526 /* tell fw we're ready to fire cmds */
3527 status = be_cmd_fw_init(adapter);
3528 if (status)
3529 goto err;
3530
3531 status = be_setup(adapter);
3532 if (status)
3533 goto err;
3534
3535 if (netif_running(netdev)) {
3536 status = be_open(netdev);
3537 if (status)
3538 goto err;
3539 }
3540 netif_device_attach(netdev);
3541 return;
3542err:
3543 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003544}
3545
3546static struct pci_error_handlers be_eeh_handlers = {
3547 .error_detected = be_eeh_err_detected,
3548 .slot_reset = be_eeh_reset,
3549 .resume = be_eeh_resume,
3550};
3551
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003552static struct pci_driver be_driver = {
3553 .name = DRV_NAME,
3554 .id_table = be_dev_ids,
3555 .probe = be_probe,
3556 .remove = be_remove,
3557 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003558 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003559 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003560 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003561};
3562
3563static int __init be_init_module(void)
3564{
Joe Perches8e95a202009-12-03 07:58:21 +00003565 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3566 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003567 printk(KERN_WARNING DRV_NAME
3568 " : Module param rx_frag_size must be 2048/4096/8192."
3569 " Using 2048\n");
3570 rx_frag_size = 2048;
3571 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003572
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003573 return pci_register_driver(&be_driver);
3574}
3575module_init(be_init_module);
3576
3577static void __exit be_exit_module(void)
3578{
3579 pci_unregister_driver(&be_driver);
3580}
3581module_exit(be_exit_module);