blob: 816ce56de7ac71e8e89079479d1b6cfb9c51074c [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perladb3ea782011-08-22 19:41:52 +0000144 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000145
Sathya Perlacf588472010-02-14 21:22:01 +0000146 if (adapter->eeh_err)
147 return;
148
Sathya Perladb3ea782011-08-22 19:41:52 +0000149 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
150 &reg);
151 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perladb3ea782011-08-22 19:41:52 +0000160 pci_write_config_dword(adapter->pdev,
161 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162}
163
Sathya Perla8788fdc2009-07-27 22:52:03 +0000164static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165{
166 u32 val = 0;
167 val |= qid & DB_RQ_RING_ID_MASK;
168 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000169
170 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_TXULP_RING_ID_MASK;
178 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185 bool arm, bool clear_int, u16 num_popped)
186{
187 u32 val = 0;
188 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000189 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000191
192 if (adapter->eeh_err)
193 return;
194
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000208 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000210
211 if (adapter->eeh_err)
212 return;
213
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214 if (arm)
215 val |= 1 << DB_CQ_REARM_SHIFT;
216 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218}
219
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220static int be_mac_addr_set(struct net_device *netdev, void *p)
221{
222 struct be_adapter *adapter = netdev_priv(netdev);
223 struct sockaddr *addr = p;
224 int status = 0;
225
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
228
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000229 /* MAC addr configuration will be done in hardware for VFs
230 * by their corresponding PFs. Just copy to netdev addr here
231 */
232 if (!be_physfn(adapter))
233 goto netdev_addr;
234
Ajit Khapardef8617e02011-02-11 13:36:37 +0000235 status = be_cmd_pmac_del(adapter, adapter->if_handle,
236 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 if (status)
238 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Sathya Perlaa65027e2009-08-17 00:58:04 +0000240 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000241 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000242netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243 if (!status)
244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
245
246 return status;
247}
248
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000249static void populate_be2_stats(struct be_adapter *adapter)
250{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000251 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
252 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
253 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000254 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000255 &rxf_stats->port[adapter->port_num];
256 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000257
Sathya Perlaac124ff2011-07-25 19:10:14 +0000258 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000276 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000277 drvs->rx_alignment_symbol_errors =
278 port_stats->rx_alignment_symbol_errors;
279
280 drvs->tx_pauseframes = port_stats->tx_pauseframes;
281 drvs->tx_controlframes = port_stats->tx_controlframes;
282
283 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000284 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000285 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
288 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
289 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
290 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
291 drvs->forwarded_packets = rxf_stats->forwarded_packets;
292 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000293 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
294 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000295 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
296}
297
298static void populate_be3_stats(struct be_adapter *adapter)
299{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000300 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
301 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
302 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 &rxf_stats->port[adapter->port_num];
305 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306
Sathya Perlaac124ff2011-07-25 19:10:14 +0000307 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000308 drvs->rx_pause_frames = port_stats->rx_pause_frames;
309 drvs->rx_crc_errors = port_stats->rx_crc_errors;
310 drvs->rx_control_frames = port_stats->rx_control_frames;
311 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
312 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
313 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
314 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
315 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
316 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
317 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
318 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
319 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
320 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
321 drvs->rx_dropped_header_too_small =
322 port_stats->rx_dropped_header_too_small;
323 drvs->rx_input_fifo_overflow_drop =
324 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->rx_alignment_symbol_errors =
327 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000328 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000329 drvs->tx_pauseframes = port_stats->tx_pauseframes;
330 drvs->tx_controlframes = port_stats->tx_controlframes;
331 drvs->jabber_events = port_stats->jabber_events;
332 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
333 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
334 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
335 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
336 drvs->forwarded_packets = rxf_stats->forwarded_packets;
337 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
339 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
341}
342
Selvin Xavier005d5692011-05-16 07:36:35 +0000343static void populate_lancer_stats(struct be_adapter *adapter)
344{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345
Selvin Xavier005d5692011-05-16 07:36:35 +0000346 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct lancer_pport_stats *pport_stats =
348 pport_stats_from_cmd(adapter);
349
350 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
351 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
352 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
353 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000354 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000355 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000356 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
357 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
358 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
359 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
360 drvs->rx_dropped_tcp_length =
361 pport_stats->rx_dropped_invalid_tcp_length;
362 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
363 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
364 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
365 drvs->rx_dropped_header_too_small =
366 pport_stats->rx_dropped_header_too_small;
367 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
372 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000373 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000374 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 drvs->forwarded_packets = pport_stats->num_forwards_lo;
376 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000377 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000378 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000379}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380
Sathya Perla09c1c682011-08-22 19:41:53 +0000381static void accumulate_16bit_val(u32 *acc, u16 val)
382{
383#define lo(x) (x & 0xFFFF)
384#define hi(x) (x & 0xFFFF0000)
385 bool wrapped = val < lo(*acc);
386 u32 newacc = hi(*acc) + val;
387
388 if (wrapped)
389 newacc += 65536;
390 ACCESS_ONCE(*acc) = newacc;
391}
392
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393void be_parse_stats(struct be_adapter *adapter)
394{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
396 struct be_rx_obj *rxo;
397 int i;
398
Selvin Xavier005d5692011-05-16 07:36:35 +0000399 if (adapter->generation == BE_GEN3) {
400 if (lancer_chip(adapter))
401 populate_lancer_stats(adapter);
402 else
403 populate_be3_stats(adapter);
404 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000406 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000407
408 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000409 for_all_rx_queues(adapter, rxo, i) {
410 /* below erx HW counter can actually wrap around after
411 * 65535. Driver accumulates a 32-bit value
412 */
413 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
414 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
415 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416}
417
Sathya Perlaab1594e2011-07-25 19:10:15 +0000418static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
419 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700420{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000421 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700423 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000424 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000425 u64 pkts, bytes;
426 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700427 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700428
Sathya Perla3abcded2010-10-03 22:12:27 -0700429 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000430 const struct be_rx_stats *rx_stats = rx_stats(rxo);
431 do {
432 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
433 pkts = rx_stats(rxo)->rx_pkts;
434 bytes = rx_stats(rxo)->rx_bytes;
435 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
436 stats->rx_packets += pkts;
437 stats->rx_bytes += bytes;
438 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
439 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
440 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700441 }
442
Sathya Perla3c8def92011-06-12 20:01:58 +0000443 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000444 const struct be_tx_stats *tx_stats = tx_stats(txo);
445 do {
446 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
447 pkts = tx_stats(txo)->tx_pkts;
448 bytes = tx_stats(txo)->tx_bytes;
449 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
450 stats->tx_packets += pkts;
451 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000452 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700453
454 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000455 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000456 drvs->rx_alignment_symbol_errors +
457 drvs->rx_in_range_errors +
458 drvs->rx_out_range_errors +
459 drvs->rx_frame_too_long +
460 drvs->rx_dropped_too_small +
461 drvs->rx_dropped_too_short +
462 drvs->rx_dropped_header_too_small +
463 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000464 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700465
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700466 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000467 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000470
Sathya Perlaab1594e2011-07-25 19:10:15 +0000471 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472
473 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000474 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000475
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476 /* receiver fifo overrun */
477 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000478 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000479 drvs->rx_input_fifo_overflow_drop +
480 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000481 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482}
483
Sathya Perlaea172a02011-08-02 19:57:42 +0000484void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700485{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486 struct net_device *netdev = adapter->netdev;
487
Sathya Perlaea172a02011-08-02 19:57:42 +0000488 /* when link status changes, link speed must be re-queried from card */
489 adapter->link_speed = -1;
490 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
491 netif_carrier_on(netdev);
492 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
493 } else {
494 netif_carrier_off(netdev);
495 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497}
498
Sathya Perla3c8def92011-06-12 20:01:58 +0000499static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000500 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501{
Sathya Perla3c8def92011-06-12 20:01:58 +0000502 struct be_tx_stats *stats = tx_stats(txo);
503
Sathya Perlaab1594e2011-07-25 19:10:15 +0000504 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000505 stats->tx_reqs++;
506 stats->tx_wrbs += wrb_cnt;
507 stats->tx_bytes += copied;
508 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000510 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000511 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512}
513
514/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000515static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
516 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700518 int cnt = (skb->len > skb->data_len);
519
520 cnt += skb_shinfo(skb)->nr_frags;
521
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522 /* to account for hdr wrb */
523 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000524 if (lancer_chip(adapter) || !(cnt & 1)) {
525 *dummy = false;
526 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527 /* add a dummy to make it an even num */
528 cnt++;
529 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000530 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
532 return cnt;
533}
534
535static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
536{
537 wrb->frag_pa_hi = upper_32_bits(addr);
538 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
539 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
540}
541
Somnath Koturcc4ce022010-10-21 07:11:14 -0700542static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
543 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700544{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700545 u8 vlan_prio = 0;
546 u16 vlan_tag = 0;
547
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 memset(hdr, 0, sizeof(*hdr));
549
550 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
551
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000552 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
554 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
555 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000556 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000558 if (lancer_chip(adapter) && adapter->sli_family ==
559 LANCER_A0_SLI_FAMILY) {
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
561 if (is_tcp_pkt(skb))
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
563 tcpcs, hdr, 1);
564 else if (is_udp_pkt(skb))
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
566 udpcs, hdr, 1);
567 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
569 if (is_tcp_pkt(skb))
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
571 else if (is_udp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
573 }
574
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700575 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700577 vlan_tag = vlan_tx_tag_get(skb);
578 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
579 /* If vlan priority provided by OS is NOT in available bmap */
580 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
581 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
582 adapter->recommended_prio;
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 }
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
590}
591
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000592static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000593 bool unmap_single)
594{
595 dma_addr_t dma;
596
597 be_dws_le_to_cpu(wrb, sizeof(*wrb));
598
599 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000600 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000601 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000602 dma_unmap_single(dev, dma, wrb->frag_len,
603 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000604 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000605 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000606 }
607}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608
Sathya Perla3c8def92011-06-12 20:01:58 +0000609static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
611{
Sathya Perla7101e112010-03-22 20:41:12 +0000612 dma_addr_t busaddr;
613 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000614 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700616 struct be_eth_wrb *wrb;
617 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000618 bool map_single = false;
619 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700620
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 hdr = queue_head_node(txq);
622 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000623 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624
David S. Millerebc8d2a2009-06-09 01:01:31 -0700625 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700626 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000627 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
628 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000629 goto dma_err;
630 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700631 wrb = queue_head_node(txq);
632 wrb_fill(wrb, busaddr, len);
633 be_dws_cpu_to_le(wrb, sizeof(*wrb));
634 queue_head_inc(txq);
635 copied += len;
636 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637
David S. Millerebc8d2a2009-06-09 01:01:31 -0700638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
639 struct skb_frag_struct *frag =
640 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000641 busaddr = skb_frag_dma_map(dev, frag, 0,
642 frag->size, DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000643 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000644 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700645 wrb = queue_head_node(txq);
646 wrb_fill(wrb, busaddr, frag->size);
647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq);
649 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650 }
651
652 if (dummy_wrb) {
653 wrb = queue_head_node(txq);
654 wrb_fill(wrb, 0, 0);
655 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656 queue_head_inc(txq);
657 }
658
Somnath Koturcc4ce022010-10-21 07:11:14 -0700659 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 be_dws_cpu_to_le(hdr, sizeof(*hdr));
661
662 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000663dma_err:
664 txq->head = map_head;
665 while (copied) {
666 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000667 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000668 map_single = false;
669 copied -= wrb->frag_len;
670 queue_head_inc(txq);
671 }
672 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
Stephen Hemminger613573252009-08-31 19:50:58 +0000675static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700676 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677{
678 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000679 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
680 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 u32 wrb_cnt = 0, copied = 0;
682 u32 start = txq->head;
683 bool dummy_wrb, stopped = false;
684
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686
Sathya Perla3c8def92011-06-12 20:01:58 +0000687 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000688 if (copied) {
689 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000690 BUG_ON(txo->sent_skb_list[start]);
691 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000693 /* Ensure txq has space for the next skb; Else stop the queue
694 * *BEFORE* ringing the tx doorbell, so that we serialze the
695 * tx compls of the current transmit which'll wake up the queue
696 */
Sathya Perla7101e112010-03-22 20:41:12 +0000697 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000698 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
699 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000700 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000701 stopped = true;
702 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000704 be_txq_notify(adapter, txq->id, wrb_cnt);
705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000707 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000708 } else {
709 txq->head = start;
710 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 return NETDEV_TX_OK;
713}
714
715static int be_change_mtu(struct net_device *netdev, int new_mtu)
716{
717 struct be_adapter *adapter = netdev_priv(netdev);
718 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000719 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
720 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700721 dev_info(&adapter->pdev->dev,
722 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000723 BE_MIN_MTU,
724 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725 return -EINVAL;
726 }
727 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
728 netdev->mtu, new_mtu);
729 netdev->mtu = new_mtu;
730 return 0;
731}
732
733/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000734 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
735 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000737static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 u16 vtag[BE_NUM_VLANS_SUPPORTED];
740 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000741 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000742 u32 if_handle;
743
744 if (vf) {
745 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
746 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
747 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
748 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000750 /* No need to further configure vids if in promiscuous mode */
751 if (adapter->promiscuous)
752 return 0;
753
Ajit Khaparde82903e42010-02-09 01:34:57 +0000754 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000756 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 if (adapter->vlan_tag[i]) {
758 vtag[ntags] = cpu_to_le16(i);
759 ntags++;
760 }
761 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700762 status = be_cmd_vlan_config(adapter, adapter->if_handle,
763 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700765 status = be_cmd_vlan_config(adapter, adapter->if_handle,
766 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000768
Sathya Perlab31c50a2009-09-17 10:30:13 -0700769 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770}
771
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
773{
774 struct be_adapter *adapter = netdev_priv(netdev);
775
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000776 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000777 if (!be_physfn(adapter))
778 return;
779
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000781 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000782 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783}
784
785static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
786{
787 struct be_adapter *adapter = netdev_priv(netdev);
788
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000789 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000790
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000791 if (!be_physfn(adapter))
792 return;
793
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000795 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000796 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797}
798
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799static void be_set_multicast_list(struct net_device *netdev)
800{
801 struct be_adapter *adapter = netdev_priv(netdev);
802
803 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000804 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000805 adapter->promiscuous = true;
806 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000808
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300809 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000810 if (adapter->promiscuous) {
811 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000812 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000813
814 if (adapter->vlans_added)
815 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000816 }
817
Sathya Perlae7b909a2009-11-22 22:01:10 +0000818 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000819 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000820 netdev_mc_count(netdev) > BE_MAX_MC) {
821 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000822 goto done;
823 }
824
Sathya Perla5b8821b2011-08-02 19:57:44 +0000825 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000826done:
827 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828}
829
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000830static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
831{
832 struct be_adapter *adapter = netdev_priv(netdev);
833 int status;
834
835 if (!adapter->sriov_enabled)
836 return -EPERM;
837
838 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
839 return -EINVAL;
840
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000841 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
842 status = be_cmd_pmac_del(adapter,
843 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000844 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000845
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000846 status = be_cmd_pmac_add(adapter, mac,
847 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000848 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000849
850 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000851 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
852 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000853 else
854 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
855
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000856 return status;
857}
858
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000859static int be_get_vf_config(struct net_device *netdev, int vf,
860 struct ifla_vf_info *vi)
861{
862 struct be_adapter *adapter = netdev_priv(netdev);
863
864 if (!adapter->sriov_enabled)
865 return -EPERM;
866
867 if (vf >= num_vfs)
868 return -EINVAL;
869
870 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000871 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000872 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000873 vi->qos = 0;
874 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
875
876 return 0;
877}
878
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000879static int be_set_vf_vlan(struct net_device *netdev,
880 int vf, u16 vlan, u8 qos)
881{
882 struct be_adapter *adapter = netdev_priv(netdev);
883 int status = 0;
884
885 if (!adapter->sriov_enabled)
886 return -EPERM;
887
888 if ((vf >= num_vfs) || (vlan > 4095))
889 return -EINVAL;
890
891 if (vlan) {
892 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
893 adapter->vlans_added++;
894 } else {
895 adapter->vf_cfg[vf].vf_vlan_tag = 0;
896 adapter->vlans_added--;
897 }
898
899 status = be_vid_config(adapter, true, vf);
900
901 if (status)
902 dev_info(&adapter->pdev->dev,
903 "VLAN %d config on VF %d failed\n", vlan, vf);
904 return status;
905}
906
Ajit Khapardee1d18732010-07-23 01:52:13 +0000907static int be_set_vf_tx_rate(struct net_device *netdev,
908 int vf, int rate)
909{
910 struct be_adapter *adapter = netdev_priv(netdev);
911 int status = 0;
912
913 if (!adapter->sriov_enabled)
914 return -EPERM;
915
916 if ((vf >= num_vfs) || (rate < 0))
917 return -EINVAL;
918
919 if (rate > 10000)
920 rate = 10000;
921
922 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000923 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000924
925 if (status)
926 dev_info(&adapter->pdev->dev,
927 "tx rate %d on VF %d failed\n", rate, vf);
928 return status;
929}
930
Sathya Perlaac124ff2011-07-25 19:10:14 +0000931static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000933 struct be_eq_obj *rx_eq = &rxo->rx_eq;
934 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700935 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000936 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000937 u64 pkts;
938 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000939
940 if (!rx_eq->enable_aic)
941 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942
Sathya Perla4097f662009-03-24 16:40:13 -0700943 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700944 if (time_before(now, stats->rx_jiffies)) {
945 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700946 return;
947 }
948
Sathya Perlaac124ff2011-07-25 19:10:14 +0000949 /* Update once a second */
950 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700951 return;
952
Sathya Perlaab1594e2011-07-25 19:10:15 +0000953 do {
954 start = u64_stats_fetch_begin_bh(&stats->sync);
955 pkts = stats->rx_pkts;
956 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
957
Eric Dumazet68c3e5a2011-08-09 06:23:07 +0000958 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000959 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700960 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000961 eqd = stats->rx_pps / 110000;
962 eqd = eqd << 3;
963 if (eqd > rx_eq->max_eqd)
964 eqd = rx_eq->max_eqd;
965 if (eqd < rx_eq->min_eqd)
966 eqd = rx_eq->min_eqd;
967 if (eqd < 10)
968 eqd = 0;
969 if (eqd != rx_eq->cur_eqd) {
970 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
971 rx_eq->cur_eqd = eqd;
972 }
Sathya Perla4097f662009-03-24 16:40:13 -0700973}
974
Sathya Perla3abcded2010-10-03 22:12:27 -0700975static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000976 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700977{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000978 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700979
Sathya Perlaab1594e2011-07-25 19:10:15 +0000980 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700981 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000982 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700983 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000984 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700985 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000986 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000987 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000988 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989}
990
Sathya Perla2e588f82011-03-11 02:49:26 +0000991static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700992{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000993 /* L4 checksum is not reliable for non TCP/UDP packets.
994 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000995 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
996 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700997}
998
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001000get_rx_page_info(struct be_adapter *adapter,
1001 struct be_rx_obj *rxo,
1002 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001003{
1004 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001005 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006
Sathya Perla3abcded2010-10-03 22:12:27 -07001007 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008 BUG_ON(!rx_page_info->page);
1009
Ajit Khaparde205859a2010-02-09 01:34:21 +00001010 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001011 dma_unmap_page(&adapter->pdev->dev,
1012 dma_unmap_addr(rx_page_info, bus),
1013 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001014 rx_page_info->last_page_user = false;
1015 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016
1017 atomic_dec(&rxq->used);
1018 return rx_page_info;
1019}
1020
1021/* Throwaway the data in the Rx completion */
1022static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001023 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001024 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025{
Sathya Perla3abcded2010-10-03 22:12:27 -07001026 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001028 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001030 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001031 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001032 put_page(page_info->page);
1033 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001034 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035 }
1036}
1037
1038/*
1039 * skb_fill_rx_data forms a complete skb for an ether frame
1040 * indicated by rxcp.
1041 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001042static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001043 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044{
Sathya Perla3abcded2010-10-03 22:12:27 -07001045 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001046 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001047 u16 i, j;
1048 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049 u8 *start;
1050
Sathya Perla2e588f82011-03-11 02:49:26 +00001051 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 start = page_address(page_info->page) + page_info->page_offset;
1053 prefetch(start);
1054
1055 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001056 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057
1058 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001059 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060 memcpy(skb->data, start, hdr_len);
1061 skb->len = curr_frag_len;
1062 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1063 /* Complete packet has now been moved to data */
1064 put_page(page_info->page);
1065 skb->data_len = 0;
1066 skb->tail += curr_frag_len;
1067 } else {
1068 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001069 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 skb_shinfo(skb)->frags[0].page_offset =
1071 page_info->page_offset + hdr_len;
1072 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1073 skb->data_len = curr_frag_len - hdr_len;
1074 skb->tail += hdr_len;
1075 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001076 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077
Sathya Perla2e588f82011-03-11 02:49:26 +00001078 if (rxcp->pkt_size <= rx_frag_size) {
1079 BUG_ON(rxcp->num_rcvd != 1);
1080 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081 }
1082
1083 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001084 index_inc(&rxcp->rxq_idx, rxq->len);
1085 remaining = rxcp->pkt_size - curr_frag_len;
1086 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1087 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1088 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001090 /* Coalesce all frags from the same physical page in one slot */
1091 if (page_info->page_offset == 0) {
1092 /* Fresh page */
1093 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001094 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001095 skb_shinfo(skb)->frags[j].page_offset =
1096 page_info->page_offset;
1097 skb_shinfo(skb)->frags[j].size = 0;
1098 skb_shinfo(skb)->nr_frags++;
1099 } else {
1100 put_page(page_info->page);
1101 }
1102
1103 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104 skb->len += curr_frag_len;
1105 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106
Sathya Perla2e588f82011-03-11 02:49:26 +00001107 remaining -= curr_frag_len;
1108 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001109 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001111 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112}
1113
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001114/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001116 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001117 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001119 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001121
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001122 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001123 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001124 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001125 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126 return;
1127 }
1128
Sathya Perla2e588f82011-03-11 02:49:26 +00001129 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001131 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001132 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001133 else
1134 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135
1136 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001137 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001138 if (adapter->netdev->features & NETIF_F_RXHASH)
1139 skb->rxhash = rxcp->rss_hash;
1140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141
Jiri Pirko343e43c2011-08-25 02:50:51 +00001142 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001143 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1144
1145 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146}
1147
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001148/* Process the RX completion indicated by rxcp when GRO is enabled */
1149static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001150 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001151 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152{
1153 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001154 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001155 struct be_queue_info *rxq = &rxo->q;
1156 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001157 u16 remaining, curr_frag_len;
1158 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001159
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001160 skb = napi_get_frags(&eq_obj->napi);
1161 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001162 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001163 return;
1164 }
1165
Sathya Perla2e588f82011-03-11 02:49:26 +00001166 remaining = rxcp->pkt_size;
1167 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169
1170 curr_frag_len = min(remaining, rx_frag_size);
1171
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001172 /* Coalesce all frags from the same physical page in one slot */
1173 if (i == 0 || page_info->page_offset == 0) {
1174 /* First frag or Fresh page */
1175 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001176 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset;
1179 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001180 } else {
1181 put_page(page_info->page);
1182 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001183 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001184
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001186 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187 memset(page_info, 0, sizeof(*page_info));
1188 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001189 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001191 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 skb->len = rxcp->pkt_size;
1193 skb->data_len = rxcp->pkt_size;
1194 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001195 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001196 if (adapter->netdev->features & NETIF_F_RXHASH)
1197 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001198
Jiri Pirko343e43c2011-08-25 02:50:51 +00001199 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001200 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1201
1202 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203}
1204
Sathya Perla2e588f82011-03-11 02:49:26 +00001205static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1206 struct be_eth_rx_compl *compl,
1207 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208{
Sathya Perla2e588f82011-03-11 02:49:26 +00001209 rxcp->pkt_size =
1210 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1211 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1212 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1213 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001214 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001215 rxcp->ip_csum =
1216 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1217 rxcp->l4_csum =
1218 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1219 rxcp->ipv6 =
1220 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1221 rxcp->rxq_idx =
1222 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1223 rxcp->num_rcvd =
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1225 rxcp->pkt_type =
1226 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001227 rxcp->rss_hash =
1228 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001229 if (rxcp->vlanf) {
1230 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001231 compl);
1232 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1233 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001234 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001235 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001236}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237
Sathya Perla2e588f82011-03-11 02:49:26 +00001238static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1239 struct be_eth_rx_compl *compl,
1240 struct be_rx_compl_info *rxcp)
1241{
1242 rxcp->pkt_size =
1243 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1244 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1245 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1246 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001247 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001248 rxcp->ip_csum =
1249 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1250 rxcp->l4_csum =
1251 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1252 rxcp->ipv6 =
1253 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1254 rxcp->rxq_idx =
1255 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1256 rxcp->num_rcvd =
1257 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1258 rxcp->pkt_type =
1259 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001260 rxcp->rss_hash =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001262 if (rxcp->vlanf) {
1263 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001264 compl);
1265 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1266 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001267 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001268 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001269}
1270
1271static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1272{
1273 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1274 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1275 struct be_adapter *adapter = rxo->adapter;
1276
1277 /* For checking the valid bit it is Ok to use either definition as the
1278 * valid bit is at the same position in both v0 and v1 Rx compl */
1279 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280 return NULL;
1281
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001282 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001283 be_dws_le_to_cpu(compl, sizeof(*compl));
1284
1285 if (adapter->be3_native)
1286 be_parse_rx_compl_v1(adapter, compl, rxcp);
1287 else
1288 be_parse_rx_compl_v0(adapter, compl, rxcp);
1289
Sathya Perla15d72182011-03-21 20:49:26 +00001290 if (rxcp->vlanf) {
1291 /* vlanf could be wrongly set in some cards.
1292 * ignore if vtm is not set */
1293 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1294 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001295
Sathya Perla15d72182011-03-21 20:49:26 +00001296 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001297 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001298
Somnath Kotur939cf302011-08-18 21:51:49 -07001299 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001300 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001301 rxcp->vlanf = 0;
1302 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001303
1304 /* As the compl has been parsed, reset it; we wont touch it again */
1305 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001306
Sathya Perla3abcded2010-10-03 22:12:27 -07001307 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308 return rxcp;
1309}
1310
Eric Dumazet1829b082011-03-01 05:48:12 +00001311static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001314
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001316 gfp |= __GFP_COMP;
1317 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318}
1319
1320/*
1321 * Allocate a page, split it to fragments of size rx_frag_size and post as
1322 * receive buffers to BE
1323 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001324static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325{
Sathya Perla3abcded2010-10-03 22:12:27 -07001326 struct be_adapter *adapter = rxo->adapter;
1327 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001328 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001329 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001330 struct page *pagep = NULL;
1331 struct be_eth_rx_d *rxd;
1332 u64 page_dmaaddr = 0, frag_dmaaddr;
1333 u32 posted, page_offset = 0;
1334
Sathya Perla3abcded2010-10-03 22:12:27 -07001335 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1337 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001338 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001339 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001340 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 break;
1342 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001343 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1344 0, adapter->big_page_size,
1345 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346 page_info->page_offset = 0;
1347 } else {
1348 get_page(pagep);
1349 page_info->page_offset = page_offset + rx_frag_size;
1350 }
1351 page_offset = page_info->page_offset;
1352 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001353 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001354 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1355
1356 rxd = queue_head_node(rxq);
1357 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1358 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359
1360 /* Any space left in the current big page for another frag? */
1361 if ((page_offset + rx_frag_size + rx_frag_size) >
1362 adapter->big_page_size) {
1363 pagep = NULL;
1364 page_info->last_page_user = true;
1365 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001366
1367 prev_page_info = page_info;
1368 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001369 page_info = &page_info_tbl[rxq->head];
1370 }
1371 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001372 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
1374 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001376 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001377 } else if (atomic_read(&rxq->used) == 0) {
1378 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001379 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381}
1382
Sathya Perla5fb379e2009-06-18 00:02:59 +00001383static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1386
1387 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1388 return NULL;
1389
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001390 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1392
1393 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1394
1395 queue_tail_inc(tx_cq);
1396 return txcp;
1397}
1398
Sathya Perla3c8def92011-06-12 20:01:58 +00001399static u16 be_tx_compl_process(struct be_adapter *adapter,
1400 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401{
Sathya Perla3c8def92011-06-12 20:01:58 +00001402 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001403 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001404 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001406 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1407 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001409 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001411 sent_skbs[txq->tail] = NULL;
1412
1413 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001414 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001416 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001418 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001419 unmap_tx_frag(&adapter->pdev->dev, wrb,
1420 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001421 unmap_skb_hdr = false;
1422
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423 num_wrbs++;
1424 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001425 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001428 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001429}
1430
Sathya Perla859b1e42009-08-10 03:43:51 +00001431static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1432{
1433 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1434
1435 if (!eqe->evt)
1436 return NULL;
1437
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001438 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001439 eqe->evt = le32_to_cpu(eqe->evt);
1440 queue_tail_inc(&eq_obj->q);
1441 return eqe;
1442}
1443
1444static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001445 struct be_eq_obj *eq_obj,
1446 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001447{
1448 struct be_eq_entry *eqe;
1449 u16 num = 0;
1450
1451 while ((eqe = event_get(eq_obj)) != NULL) {
1452 eqe->evt = 0;
1453 num++;
1454 }
1455
1456 /* Deal with any spurious interrupts that come
1457 * without events
1458 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001459 if (!num)
1460 rearm = true;
1461
1462 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001463 if (num)
1464 napi_schedule(&eq_obj->napi);
1465
1466 return num;
1467}
1468
1469/* Just read and notify events without processing them.
1470 * Used at the time of destroying event queues */
1471static void be_eq_clean(struct be_adapter *adapter,
1472 struct be_eq_obj *eq_obj)
1473{
1474 struct be_eq_entry *eqe;
1475 u16 num = 0;
1476
1477 while ((eqe = event_get(eq_obj)) != NULL) {
1478 eqe->evt = 0;
1479 num++;
1480 }
1481
1482 if (num)
1483 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1484}
1485
Sathya Perla3abcded2010-10-03 22:12:27 -07001486static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487{
1488 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001489 struct be_queue_info *rxq = &rxo->q;
1490 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001491 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 u16 tail;
1493
1494 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001495 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1496 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001497 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498 }
1499
1500 /* Then free posted rx buffer that were not used */
1501 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001502 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001503 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504 put_page(page_info->page);
1505 memset(page_info, 0, sizeof(*page_info));
1506 }
1507 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001508 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509}
1510
Sathya Perla3c8def92011-06-12 20:01:58 +00001511static void be_tx_compl_clean(struct be_adapter *adapter,
1512 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513{
Sathya Perla3c8def92011-06-12 20:01:58 +00001514 struct be_queue_info *tx_cq = &txo->cq;
1515 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001516 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001517 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001518 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001519 struct sk_buff *sent_skb;
1520 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521
Sathya Perlaa8e91792009-08-10 03:42:43 +00001522 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1523 do {
1524 while ((txcp = be_tx_compl_get(tx_cq))) {
1525 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1526 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001527 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001528 cmpl++;
1529 }
1530 if (cmpl) {
1531 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001532 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001533 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001534 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001535 }
1536
1537 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1538 break;
1539
1540 mdelay(1);
1541 } while (true);
1542
1543 if (atomic_read(&txq->used))
1544 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1545 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001546
1547 /* free posted tx for which compls will never arrive */
1548 while (atomic_read(&txq->used)) {
1549 sent_skb = sent_skbs[txq->tail];
1550 end_idx = txq->tail;
1551 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001552 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1553 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001554 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001555 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001556 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557}
1558
Sathya Perla5fb379e2009-06-18 00:02:59 +00001559static void be_mcc_queues_destroy(struct be_adapter *adapter)
1560{
1561 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001562
Sathya Perla8788fdc2009-07-27 22:52:03 +00001563 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001564 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001565 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001566 be_queue_free(adapter, q);
1567
Sathya Perla8788fdc2009-07-27 22:52:03 +00001568 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001569 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001570 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001571 be_queue_free(adapter, q);
1572}
1573
1574/* Must be called only after TX qs are created as MCC shares TX EQ */
1575static int be_mcc_queues_create(struct be_adapter *adapter)
1576{
1577 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001578
1579 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001580 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001581 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001582 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001583 goto err;
1584
1585 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001586 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001587 goto mcc_cq_free;
1588
1589 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001590 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001591 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1592 goto mcc_cq_destroy;
1593
1594 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001595 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001596 goto mcc_q_free;
1597
1598 return 0;
1599
1600mcc_q_free:
1601 be_queue_free(adapter, q);
1602mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001603 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001604mcc_cq_free:
1605 be_queue_free(adapter, cq);
1606err:
1607 return -1;
1608}
1609
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610static void be_tx_queues_destroy(struct be_adapter *adapter)
1611{
1612 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001613 struct be_tx_obj *txo;
1614 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615
Sathya Perla3c8def92011-06-12 20:01:58 +00001616 for_all_tx_queues(adapter, txo, i) {
1617 q = &txo->q;
1618 if (q->created)
1619 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1620 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621
Sathya Perla3c8def92011-06-12 20:01:58 +00001622 q = &txo->cq;
1623 if (q->created)
1624 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1625 be_queue_free(adapter, q);
1626 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627
Sathya Perla859b1e42009-08-10 03:43:51 +00001628 /* Clear any residual events */
1629 be_eq_clean(adapter, &adapter->tx_eq);
1630
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631 q = &adapter->tx_eq.q;
1632 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001633 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 be_queue_free(adapter, q);
1635}
1636
Sathya Perla3c8def92011-06-12 20:01:58 +00001637/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638static int be_tx_queues_create(struct be_adapter *adapter)
1639{
1640 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001641 struct be_tx_obj *txo;
1642 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643
1644 adapter->tx_eq.max_eqd = 0;
1645 adapter->tx_eq.min_eqd = 0;
1646 adapter->tx_eq.cur_eqd = 96;
1647 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001648
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001650 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1651 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652 return -1;
1653
Sathya Perla8788fdc2009-07-27 22:52:03 +00001654 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001655 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001656 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001657
Sathya Perla3c8def92011-06-12 20:01:58 +00001658 for_all_tx_queues(adapter, txo, i) {
1659 cq = &txo->cq;
1660 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001662 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663
Sathya Perla3c8def92011-06-12 20:01:58 +00001664 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1665 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666
Sathya Perla3c8def92011-06-12 20:01:58 +00001667 q = &txo->q;
1668 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1669 sizeof(struct be_eth_wrb)))
1670 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671
Sathya Perla3c8def92011-06-12 20:01:58 +00001672 if (be_cmd_txq_create(adapter, q, cq))
1673 goto err;
1674 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675 return 0;
1676
Sathya Perla3c8def92011-06-12 20:01:58 +00001677err:
1678 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 return -1;
1680}
1681
1682static void be_rx_queues_destroy(struct be_adapter *adapter)
1683{
1684 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001685 struct be_rx_obj *rxo;
1686 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
Sathya Perla3abcded2010-10-03 22:12:27 -07001688 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001689 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001690
Sathya Perla3abcded2010-10-03 22:12:27 -07001691 q = &rxo->cq;
1692 if (q->created)
1693 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1694 be_queue_free(adapter, q);
1695
Sathya Perla3abcded2010-10-03 22:12:27 -07001696 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001697 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001698 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001699 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701}
1702
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001703static u32 be_num_rxqs_want(struct be_adapter *adapter)
1704{
Sathya Perlac814fd32011-06-26 20:41:25 +00001705 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001706 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1707 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1708 } else {
1709 dev_warn(&adapter->pdev->dev,
1710 "No support for multiple RX queues\n");
1711 return 1;
1712 }
1713}
1714
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715static int be_rx_queues_create(struct be_adapter *adapter)
1716{
1717 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001718 struct be_rx_obj *rxo;
1719 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001721 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1722 msix_enabled(adapter) ?
1723 adapter->num_msix_vec - 1 : 1);
1724 if (adapter->num_rx_qs != MAX_RX_QS)
1725 dev_warn(&adapter->pdev->dev,
1726 "Can create only %d RX queues", adapter->num_rx_qs);
1727
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001729 for_all_rx_queues(adapter, rxo, i) {
1730 rxo->adapter = adapter;
1731 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1732 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733
Sathya Perla3abcded2010-10-03 22:12:27 -07001734 /* EQ */
1735 eq = &rxo->rx_eq.q;
1736 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1737 sizeof(struct be_eq_entry));
1738 if (rc)
1739 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740
Sathya Perla3abcded2010-10-03 22:12:27 -07001741 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1742 if (rc)
1743 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001745 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001746
Sathya Perla3abcded2010-10-03 22:12:27 -07001747 /* CQ */
1748 cq = &rxo->cq;
1749 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1750 sizeof(struct be_eth_rx_compl));
1751 if (rc)
1752 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753
Sathya Perla3abcded2010-10-03 22:12:27 -07001754 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1755 if (rc)
1756 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001757
1758 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001759 q = &rxo->q;
1760 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1761 sizeof(struct be_eth_rx_d));
1762 if (rc)
1763 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764
Sathya Perla3abcded2010-10-03 22:12:27 -07001765 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766
1767 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001768err:
1769 be_rx_queues_destroy(adapter);
1770 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001773static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001774{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001775 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1776 if (!eqe->evt)
1777 return false;
1778 else
1779 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001780}
1781
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782static irqreturn_t be_intx(int irq, void *dev)
1783{
1784 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001785 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001786 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001788 if (lancer_chip(adapter)) {
1789 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001790 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001791 for_all_rx_queues(adapter, rxo, i) {
1792 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001793 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001794 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001796 if (!(tx || rx))
1797 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001798
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001799 } else {
1800 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1801 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1802 if (!isr)
1803 return IRQ_NONE;
1804
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001805 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001806 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001807
1808 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001809 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001810 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001811 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001812 }
Sathya Perlac001c212009-07-01 01:06:07 +00001813
Sathya Perla8788fdc2009-07-27 22:52:03 +00001814 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815}
1816
1817static irqreturn_t be_msix_rx(int irq, void *dev)
1818{
Sathya Perla3abcded2010-10-03 22:12:27 -07001819 struct be_rx_obj *rxo = dev;
1820 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821
Sathya Perla3c8def92011-06-12 20:01:58 +00001822 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823
1824 return IRQ_HANDLED;
1825}
1826
Sathya Perla5fb379e2009-06-18 00:02:59 +00001827static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828{
1829 struct be_adapter *adapter = dev;
1830
Sathya Perla3c8def92011-06-12 20:01:58 +00001831 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832
1833 return IRQ_HANDLED;
1834}
1835
Sathya Perla2e588f82011-03-11 02:49:26 +00001836static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837{
Sathya Perla2e588f82011-03-11 02:49:26 +00001838 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839}
1840
stephen hemminger49b05222010-10-21 07:50:48 +00001841static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842{
1843 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001844 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1845 struct be_adapter *adapter = rxo->adapter;
1846 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001847 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848 u32 work_done;
1849
Sathya Perlaac124ff2011-07-25 19:10:14 +00001850 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001852 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 if (!rxcp)
1854 break;
1855
Sathya Perla12004ae2011-08-02 19:57:46 +00001856 /* Is it a flush compl that has no data */
1857 if (unlikely(rxcp->num_rcvd == 0))
1858 goto loop_continue;
1859
1860 /* Discard compl with partial DMA Lancer B0 */
1861 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001862 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001863 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001864 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001865
Sathya Perla12004ae2011-08-02 19:57:46 +00001866 /* On BE drop pkts that arrive due to imperfect filtering in
1867 * promiscuous mode on some skews
1868 */
1869 if (unlikely(rxcp->port != adapter->port_num &&
1870 !lancer_chip(adapter))) {
1871 be_rx_compl_discard(adapter, rxo, rxcp);
1872 goto loop_continue;
1873 }
1874
1875 if (do_gro(rxcp))
1876 be_rx_compl_process_gro(adapter, rxo, rxcp);
1877 else
1878 be_rx_compl_process(adapter, rxo, rxcp);
1879loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001880 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 }
1882
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001884 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001885 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886
1887 /* All consumed */
1888 if (work_done < budget) {
1889 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001890 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891 } else {
1892 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001893 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894 }
1895 return work_done;
1896}
1897
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001898/* As TX and MCC share the same EQ check for both TX and MCC completions.
1899 * For TX/MCC we don't honour budget; consume everything
1900 */
1901static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001903 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1904 struct be_adapter *adapter =
1905 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001906 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001908 int tx_compl, mcc_compl, status = 0;
1909 u8 i;
1910 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911
Sathya Perla3c8def92011-06-12 20:01:58 +00001912 for_all_tx_queues(adapter, txo, i) {
1913 tx_compl = 0;
1914 num_wrbs = 0;
1915 while ((txcp = be_tx_compl_get(&txo->cq))) {
1916 num_wrbs += be_tx_compl_process(adapter, txo,
1917 AMAP_GET_BITS(struct amap_eth_tx_compl,
1918 wrb_index, txcp));
1919 tx_compl++;
1920 }
1921 if (tx_compl) {
1922 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1923
1924 atomic_sub(num_wrbs, &txo->q.used);
1925
1926 /* As Tx wrbs have been freed up, wake up netdev queue
1927 * if it was stopped due to lack of tx wrbs. */
1928 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1929 atomic_read(&txo->q.used) < txo->q.len / 2) {
1930 netif_wake_subqueue(adapter->netdev, i);
1931 }
1932
Sathya Perlaab1594e2011-07-25 19:10:15 +00001933 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001934 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001935 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001936 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937 }
1938
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001939 mcc_compl = be_process_mcc(adapter, &status);
1940
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001941 if (mcc_compl) {
1942 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1943 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1944 }
1945
Sathya Perla3c8def92011-06-12 20:01:58 +00001946 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001947
Sathya Perla3c8def92011-06-12 20:01:58 +00001948 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001949 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950 return 1;
1951}
1952
Ajit Khaparded053de92010-09-03 06:23:30 +00001953void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001954{
1955 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1956 u32 i;
1957
1958 pci_read_config_dword(adapter->pdev,
1959 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1960 pci_read_config_dword(adapter->pdev,
1961 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1962 pci_read_config_dword(adapter->pdev,
1963 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1964 pci_read_config_dword(adapter->pdev,
1965 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1966
1967 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1968 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1969
Ajit Khaparded053de92010-09-03 06:23:30 +00001970 if (ue_status_lo || ue_status_hi) {
1971 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001972 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001973 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1974 }
1975
Ajit Khaparde7c185272010-07-29 06:16:33 +00001976 if (ue_status_lo) {
1977 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1978 if (ue_status_lo & 1)
1979 dev_err(&adapter->pdev->dev,
1980 "UE: %s bit set\n", ue_status_low_desc[i]);
1981 }
1982 }
1983 if (ue_status_hi) {
1984 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1985 if (ue_status_hi & 1)
1986 dev_err(&adapter->pdev->dev,
1987 "UE: %s bit set\n", ue_status_hi_desc[i]);
1988 }
1989 }
1990
1991}
1992
Sathya Perlaea1dae12009-03-19 23:56:20 -07001993static void be_worker(struct work_struct *work)
1994{
1995 struct be_adapter *adapter =
1996 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001997 struct be_rx_obj *rxo;
1998 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001999
Sathya Perla16da8252011-03-21 20:49:27 +00002000 if (!adapter->ue_detected && !lancer_chip(adapter))
2001 be_detect_dump_ue(adapter);
2002
Somnath Koturf203af72010-10-25 23:01:03 +00002003 /* when interrupts are not yet enabled, just reap any pending
2004 * mcc completions */
2005 if (!netif_running(adapter->netdev)) {
2006 int mcc_compl, status = 0;
2007
2008 mcc_compl = be_process_mcc(adapter, &status);
2009
2010 if (mcc_compl) {
2011 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2012 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2013 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002014
Somnath Koturf203af72010-10-25 23:01:03 +00002015 goto reschedule;
2016 }
2017
Selvin Xavier005d5692011-05-16 07:36:35 +00002018 if (!adapter->stats_cmd_sent) {
2019 if (lancer_chip(adapter))
2020 lancer_cmd_get_pport_stats(adapter,
2021 &adapter->stats_cmd);
2022 else
2023 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2024 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002025
Sathya Perla3abcded2010-10-03 22:12:27 -07002026 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002027 be_rx_eqd_update(adapter, rxo);
2028
2029 if (rxo->rx_post_starved) {
2030 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002031 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002032 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002033 }
2034
Somnath Koturf203af72010-10-25 23:01:03 +00002035reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002036 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002037 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2038}
2039
Sathya Perla8d56ff12009-11-22 22:02:26 +00002040static void be_msix_disable(struct be_adapter *adapter)
2041{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002042 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002043 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002044 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002045 }
2046}
2047
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002048static void be_msix_enable(struct be_adapter *adapter)
2049{
Sathya Perla3abcded2010-10-03 22:12:27 -07002050#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002051 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002053 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002054
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002055 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056 adapter->msix_entries[i].entry = i;
2057
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002058 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002059 if (status == 0) {
2060 goto done;
2061 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002062 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002063 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002064 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002065 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002066 }
2067 return;
2068done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002069 adapter->num_msix_vec = num_vec;
2070 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071}
2072
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002073static void be_sriov_enable(struct be_adapter *adapter)
2074{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002075 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002076#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002077 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002078 int status, pos;
2079 u16 nvfs;
2080
2081 pos = pci_find_ext_capability(adapter->pdev,
2082 PCI_EXT_CAP_ID_SRIOV);
2083 pci_read_config_word(adapter->pdev,
2084 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2085
2086 if (num_vfs > nvfs) {
2087 dev_info(&adapter->pdev->dev,
2088 "Device supports %d VFs and not %d\n",
2089 nvfs, num_vfs);
2090 num_vfs = nvfs;
2091 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002092
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002093 status = pci_enable_sriov(adapter->pdev, num_vfs);
2094 adapter->sriov_enabled = status ? false : true;
2095 }
2096#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002097}
2098
2099static void be_sriov_disable(struct be_adapter *adapter)
2100{
2101#ifdef CONFIG_PCI_IOV
2102 if (adapter->sriov_enabled) {
2103 pci_disable_sriov(adapter->pdev);
2104 adapter->sriov_enabled = false;
2105 }
2106#endif
2107}
2108
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002109static inline int be_msix_vec_get(struct be_adapter *adapter,
2110 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002112 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002113}
2114
2115static int be_request_irq(struct be_adapter *adapter,
2116 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002117 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002118{
2119 struct net_device *netdev = adapter->netdev;
2120 int vec;
2121
2122 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002123 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002124 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002125}
2126
Sathya Perla3abcded2010-10-03 22:12:27 -07002127static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2128 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002129{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002130 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002131 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132}
2133
2134static int be_msix_register(struct be_adapter *adapter)
2135{
Sathya Perla3abcded2010-10-03 22:12:27 -07002136 struct be_rx_obj *rxo;
2137 int status, i;
2138 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139
Sathya Perla3abcded2010-10-03 22:12:27 -07002140 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2141 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142 if (status)
2143 goto err;
2144
Sathya Perla3abcded2010-10-03 22:12:27 -07002145 for_all_rx_queues(adapter, rxo, i) {
2146 sprintf(qname, "rxq%d", i);
2147 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2148 qname, rxo);
2149 if (status)
2150 goto err_msix;
2151 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002152
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002153 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002154
Sathya Perla3abcded2010-10-03 22:12:27 -07002155err_msix:
2156 be_free_irq(adapter, &adapter->tx_eq, adapter);
2157
2158 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2159 be_free_irq(adapter, &rxo->rx_eq, rxo);
2160
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161err:
2162 dev_warn(&adapter->pdev->dev,
2163 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002164 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165 return status;
2166}
2167
2168static int be_irq_register(struct be_adapter *adapter)
2169{
2170 struct net_device *netdev = adapter->netdev;
2171 int status;
2172
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002173 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002174 status = be_msix_register(adapter);
2175 if (status == 0)
2176 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002177 /* INTx is not supported for VF */
2178 if (!be_physfn(adapter))
2179 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180 }
2181
2182 /* INTx */
2183 netdev->irq = adapter->pdev->irq;
2184 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2185 adapter);
2186 if (status) {
2187 dev_err(&adapter->pdev->dev,
2188 "INTx request IRQ failed - err %d\n", status);
2189 return status;
2190 }
2191done:
2192 adapter->isr_registered = true;
2193 return 0;
2194}
2195
2196static void be_irq_unregister(struct be_adapter *adapter)
2197{
2198 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002199 struct be_rx_obj *rxo;
2200 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201
2202 if (!adapter->isr_registered)
2203 return;
2204
2205 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002206 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207 free_irq(netdev->irq, adapter);
2208 goto done;
2209 }
2210
2211 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 be_free_irq(adapter, &adapter->tx_eq, adapter);
2213
2214 for_all_rx_queues(adapter, rxo, i)
2215 be_free_irq(adapter, &rxo->rx_eq, rxo);
2216
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217done:
2218 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219}
2220
Sathya Perla482c9e72011-06-29 23:33:17 +00002221static void be_rx_queues_clear(struct be_adapter *adapter)
2222{
2223 struct be_queue_info *q;
2224 struct be_rx_obj *rxo;
2225 int i;
2226
2227 for_all_rx_queues(adapter, rxo, i) {
2228 q = &rxo->q;
2229 if (q->created) {
2230 be_cmd_rxq_destroy(adapter, q);
2231 /* After the rxq is invalidated, wait for a grace time
2232 * of 1ms for all dma to end and the flush compl to
2233 * arrive
2234 */
2235 mdelay(1);
2236 be_rx_q_clean(adapter, rxo);
2237 }
2238
2239 /* Clear any residual events */
2240 q = &rxo->rx_eq.q;
2241 if (q->created)
2242 be_eq_clean(adapter, &rxo->rx_eq);
2243 }
2244}
2245
Sathya Perla889cd4b2010-05-30 23:33:45 +00002246static int be_close(struct net_device *netdev)
2247{
2248 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002249 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002250 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002251 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002252 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002253
Sathya Perla889cd4b2010-05-30 23:33:45 +00002254 be_async_mcc_disable(adapter);
2255
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002256 if (!lancer_chip(adapter))
2257 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002258
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002259 for_all_rx_queues(adapter, rxo, i)
2260 napi_disable(&rxo->rx_eq.napi);
2261
2262 napi_disable(&tx_eq->napi);
2263
2264 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002265 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2266 for_all_rx_queues(adapter, rxo, i)
2267 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002268 for_all_tx_queues(adapter, txo, i)
2269 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002270 }
2271
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002272 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002273 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002274 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002275
2276 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002277 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002278 synchronize_irq(vec);
2279 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002280 } else {
2281 synchronize_irq(netdev->irq);
2282 }
2283 be_irq_unregister(adapter);
2284
Sathya Perla889cd4b2010-05-30 23:33:45 +00002285 /* Wait for all pending tx completions to arrive so that
2286 * all tx skbs are freed.
2287 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002288 for_all_tx_queues(adapter, txo, i)
2289 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002290
Sathya Perla482c9e72011-06-29 23:33:17 +00002291 be_rx_queues_clear(adapter);
2292 return 0;
2293}
2294
2295static int be_rx_queues_setup(struct be_adapter *adapter)
2296{
2297 struct be_rx_obj *rxo;
2298 int rc, i;
2299 u8 rsstable[MAX_RSS_QS];
2300
2301 for_all_rx_queues(adapter, rxo, i) {
2302 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2303 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2304 adapter->if_handle,
2305 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2306 if (rc)
2307 return rc;
2308 }
2309
2310 if (be_multi_rxq(adapter)) {
2311 for_all_rss_queues(adapter, rxo, i)
2312 rsstable[i] = rxo->rss_id;
2313
2314 rc = be_cmd_rss_config(adapter, rsstable,
2315 adapter->num_rx_qs - 1);
2316 if (rc)
2317 return rc;
2318 }
2319
2320 /* First time posting */
2321 for_all_rx_queues(adapter, rxo, i) {
2322 be_post_rx_frags(rxo, GFP_KERNEL);
2323 napi_enable(&rxo->rx_eq.napi);
2324 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002325 return 0;
2326}
2327
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328static int be_open(struct net_device *netdev)
2329{
2330 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002332 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002333 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002334
Sathya Perla482c9e72011-06-29 23:33:17 +00002335 status = be_rx_queues_setup(adapter);
2336 if (status)
2337 goto err;
2338
Sathya Perla5fb379e2009-06-18 00:02:59 +00002339 napi_enable(&tx_eq->napi);
2340
2341 be_irq_register(adapter);
2342
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002343 if (!lancer_chip(adapter))
2344 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002345
2346 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002347 for_all_rx_queues(adapter, rxo, i) {
2348 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2349 be_cq_notify(adapter, rxo->cq.id, true, 0);
2350 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002351 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002352
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002353 /* Now that interrupts are on we can process async mcc */
2354 be_async_mcc_enable(adapter);
2355
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002356 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002357 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002358 if (status)
2359 goto err;
2360
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002361 status = be_cmd_set_flow_control(adapter,
2362 adapter->tx_fc, adapter->rx_fc);
2363 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002364 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002365 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002366
Sathya Perla889cd4b2010-05-30 23:33:45 +00002367 return 0;
2368err:
2369 be_close(adapter->netdev);
2370 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002371}
2372
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002373static int be_setup_wol(struct be_adapter *adapter, bool enable)
2374{
2375 struct be_dma_mem cmd;
2376 int status = 0;
2377 u8 mac[ETH_ALEN];
2378
2379 memset(mac, 0, ETH_ALEN);
2380
2381 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002382 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2383 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002384 if (cmd.va == NULL)
2385 return -1;
2386 memset(cmd.va, 0, cmd.size);
2387
2388 if (enable) {
2389 status = pci_write_config_dword(adapter->pdev,
2390 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2391 if (status) {
2392 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002393 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002394 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2395 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002396 return status;
2397 }
2398 status = be_cmd_enable_magic_wol(adapter,
2399 adapter->netdev->dev_addr, &cmd);
2400 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2401 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2402 } else {
2403 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2404 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2405 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2406 }
2407
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002408 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002409 return status;
2410}
2411
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002412/*
2413 * Generate a seed MAC address from the PF MAC Address using jhash.
2414 * MAC Address for VFs are assigned incrementally starting from the seed.
2415 * These addresses are programmed in the ASIC by the PF and the VF driver
2416 * queries for the MAC address during its probe.
2417 */
2418static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2419{
2420 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002421 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002422 u8 mac[ETH_ALEN];
2423
2424 be_vf_eth_addr_generate(adapter, mac);
2425
2426 for (vf = 0; vf < num_vfs; vf++) {
2427 status = be_cmd_pmac_add(adapter, mac,
2428 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002429 &adapter->vf_cfg[vf].vf_pmac_id,
2430 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002431 if (status)
2432 dev_err(&adapter->pdev->dev,
2433 "Mac address add failed for VF %d\n", vf);
2434 else
2435 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2436
2437 mac[5] += 1;
2438 }
2439 return status;
2440}
2441
2442static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2443{
2444 u32 vf;
2445
2446 for (vf = 0; vf < num_vfs; vf++) {
2447 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2448 be_cmd_pmac_del(adapter,
2449 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002450 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002451 }
2452}
2453
Sathya Perla5fb379e2009-06-18 00:02:59 +00002454static int be_setup(struct be_adapter *adapter)
2455{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002456 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002457 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002458 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002459 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002460
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002461 be_cmd_req_native_mode(adapter);
2462
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002463 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2464 BE_IF_FLAGS_BROADCAST |
2465 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002466
2467 if (be_physfn(adapter)) {
2468 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2469 BE_IF_FLAGS_PROMISCUOUS |
2470 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2471 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002472
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002473 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002474 cap_flags |= BE_IF_FLAGS_RSS;
2475 en_flags |= BE_IF_FLAGS_RSS;
2476 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002477 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002478
2479 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2480 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002481 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002482 if (status != 0)
2483 goto do_none;
2484
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002485 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002486 if (adapter->sriov_enabled) {
2487 while (vf < num_vfs) {
2488 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2489 BE_IF_FLAGS_BROADCAST;
2490 status = be_cmd_if_create(adapter, cap_flags,
2491 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002492 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002493 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002494 if (status) {
2495 dev_err(&adapter->pdev->dev,
2496 "Interface Create failed for VF %d\n",
2497 vf);
2498 goto if_destroy;
2499 }
2500 adapter->vf_cfg[vf].vf_pmac_id =
2501 BE_INVALID_PMAC_ID;
2502 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002503 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002504 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002505 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002506 status = be_cmd_mac_addr_query(adapter, mac,
2507 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2508 if (!status) {
2509 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2510 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2511 }
2512 }
2513
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514 status = be_tx_queues_create(adapter);
2515 if (status != 0)
2516 goto if_destroy;
2517
2518 status = be_rx_queues_create(adapter);
2519 if (status != 0)
2520 goto tx_qs_destroy;
2521
Sathya Perla2903dd62011-06-26 20:41:53 +00002522 /* Allow all priorities by default. A GRP5 evt may modify this */
2523 adapter->vlan_prio_bmap = 0xff;
2524
Sathya Perla5fb379e2009-06-18 00:02:59 +00002525 status = be_mcc_queues_create(adapter);
2526 if (status != 0)
2527 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002528
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002529 adapter->link_speed = -1;
2530
Sathya Perla04b71172011-09-27 13:30:27 -04002531 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002532
2533 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002534 return 0;
2535
Sathya Perla5fb379e2009-06-18 00:02:59 +00002536rx_qs_destroy:
2537 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002538tx_qs_destroy:
2539 be_tx_queues_destroy(adapter);
2540if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002541 if (be_physfn(adapter) && adapter->sriov_enabled)
2542 for (vf = 0; vf < num_vfs; vf++)
2543 if (adapter->vf_cfg[vf].vf_if_handle)
2544 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002545 adapter->vf_cfg[vf].vf_if_handle,
2546 vf + 1);
2547 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002548do_none:
2549 return status;
2550}
2551
Sathya Perla5fb379e2009-06-18 00:02:59 +00002552static int be_clear(struct be_adapter *adapter)
2553{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002554 int vf;
2555
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002556 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002557 be_vf_eth_addr_rem(adapter);
2558
Sathya Perla1a8887d2009-08-17 00:58:41 +00002559 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002560 be_rx_queues_destroy(adapter);
2561 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002562 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002563
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002564 if (be_physfn(adapter) && adapter->sriov_enabled)
2565 for (vf = 0; vf < num_vfs; vf++)
2566 if (adapter->vf_cfg[vf].vf_if_handle)
2567 be_cmd_if_destroy(adapter,
2568 adapter->vf_cfg[vf].vf_if_handle,
2569 vf + 1);
2570
Ajit Khaparde658681f2011-02-11 13:34:46 +00002571 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002572
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002573 adapter->be3_native = 0;
2574
Sathya Perla2243e2e2009-11-22 22:02:03 +00002575 /* tell fw we're done with firing cmds */
2576 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002577 return 0;
2578}
2579
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002580
Ajit Khaparde84517482009-09-04 03:12:16 +00002581#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002582static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002583 const u8 *p, u32 img_start, int image_size,
2584 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002585{
2586 u32 crc_offset;
2587 u8 flashed_crc[4];
2588 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002589
2590 crc_offset = hdr_size + img_start + image_size - 4;
2591
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002592 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002593
2594 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002595 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002596 if (status) {
2597 dev_err(&adapter->pdev->dev,
2598 "could not get crc from flash, not flashing redboot\n");
2599 return false;
2600 }
2601
2602 /*update redboot only if crc does not match*/
2603 if (!memcmp(flashed_crc, p, 4))
2604 return false;
2605 else
2606 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002607}
2608
Sathya Perla306f1342011-08-02 19:57:45 +00002609static bool phy_flashing_required(struct be_adapter *adapter)
2610{
2611 int status = 0;
2612 struct be_phy_info phy_info;
2613
2614 status = be_cmd_get_phy_info(adapter, &phy_info);
2615 if (status)
2616 return false;
2617 if ((phy_info.phy_type == TN_8022) &&
2618 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2619 return true;
2620 }
2621 return false;
2622}
2623
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002624static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002625 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002626 struct be_dma_mem *flash_cmd, int num_of_images)
2627
Ajit Khaparde84517482009-09-04 03:12:16 +00002628{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002629 int status = 0, i, filehdr_size = 0;
2630 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002631 int num_bytes;
2632 const u8 *p = fw->data;
2633 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002634 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002635 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002636
Sathya Perla306f1342011-08-02 19:57:45 +00002637 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002638 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2639 FLASH_IMAGE_MAX_SIZE_g3},
2640 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2641 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2642 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2643 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2644 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2645 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2646 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2647 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2648 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2649 FLASH_IMAGE_MAX_SIZE_g3},
2650 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2651 FLASH_IMAGE_MAX_SIZE_g3},
2652 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002653 FLASH_IMAGE_MAX_SIZE_g3},
2654 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002655 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2656 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2657 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002658 };
Joe Perches215faf92010-12-21 02:16:10 -08002659 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002660 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2661 FLASH_IMAGE_MAX_SIZE_g2},
2662 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2663 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2664 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2665 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2666 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2667 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2668 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2669 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2670 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2671 FLASH_IMAGE_MAX_SIZE_g2},
2672 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2673 FLASH_IMAGE_MAX_SIZE_g2},
2674 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2675 FLASH_IMAGE_MAX_SIZE_g2}
2676 };
2677
2678 if (adapter->generation == BE_GEN3) {
2679 pflashcomp = gen3_flash_types;
2680 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002681 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002682 } else {
2683 pflashcomp = gen2_flash_types;
2684 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002685 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002686 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002687 for (i = 0; i < num_comp; i++) {
2688 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2689 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2690 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002691 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2692 if (!phy_flashing_required(adapter))
2693 continue;
2694 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002695 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2696 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002697 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2698 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002699 continue;
2700 p = fw->data;
2701 p += filehdr_size + pflashcomp[i].offset
2702 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002703 if (p + pflashcomp[i].size > fw->data + fw->size)
2704 return -1;
2705 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002706 while (total_bytes) {
2707 if (total_bytes > 32*1024)
2708 num_bytes = 32*1024;
2709 else
2710 num_bytes = total_bytes;
2711 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002712 if (!total_bytes) {
2713 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2714 flash_op = FLASHROM_OPER_PHY_FLASH;
2715 else
2716 flash_op = FLASHROM_OPER_FLASH;
2717 } else {
2718 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2719 flash_op = FLASHROM_OPER_PHY_SAVE;
2720 else
2721 flash_op = FLASHROM_OPER_SAVE;
2722 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002723 memcpy(req->params.data_buf, p, num_bytes);
2724 p += num_bytes;
2725 status = be_cmd_write_flashrom(adapter, flash_cmd,
2726 pflashcomp[i].optype, flash_op, num_bytes);
2727 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002728 if ((status == ILLEGAL_IOCTL_REQ) &&
2729 (pflashcomp[i].optype ==
2730 IMG_TYPE_PHY_FW))
2731 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002732 dev_err(&adapter->pdev->dev,
2733 "cmd to write to flash rom failed.\n");
2734 return -1;
2735 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002736 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002737 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002738 return 0;
2739}
2740
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002741static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2742{
2743 if (fhdr == NULL)
2744 return 0;
2745 if (fhdr->build[0] == '3')
2746 return BE_GEN3;
2747 else if (fhdr->build[0] == '2')
2748 return BE_GEN2;
2749 else
2750 return 0;
2751}
2752
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002753static int lancer_fw_download(struct be_adapter *adapter,
2754 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002755{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002756#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2757#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2758 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002759 const u8 *data_ptr = NULL;
2760 u8 *dest_image_ptr = NULL;
2761 size_t image_size = 0;
2762 u32 chunk_size = 0;
2763 u32 data_written = 0;
2764 u32 offset = 0;
2765 int status = 0;
2766 u8 add_status = 0;
2767
2768 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2769 dev_err(&adapter->pdev->dev,
2770 "FW Image not properly aligned. "
2771 "Length must be 4 byte aligned.\n");
2772 status = -EINVAL;
2773 goto lancer_fw_exit;
2774 }
2775
2776 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2777 + LANCER_FW_DOWNLOAD_CHUNK;
2778 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2779 &flash_cmd.dma, GFP_KERNEL);
2780 if (!flash_cmd.va) {
2781 status = -ENOMEM;
2782 dev_err(&adapter->pdev->dev,
2783 "Memory allocation failure while flashing\n");
2784 goto lancer_fw_exit;
2785 }
2786
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002787 dest_image_ptr = flash_cmd.va +
2788 sizeof(struct lancer_cmd_req_write_object);
2789 image_size = fw->size;
2790 data_ptr = fw->data;
2791
2792 while (image_size) {
2793 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2794
2795 /* Copy the image chunk content. */
2796 memcpy(dest_image_ptr, data_ptr, chunk_size);
2797
2798 status = lancer_cmd_write_object(adapter, &flash_cmd,
2799 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2800 &data_written, &add_status);
2801
2802 if (status)
2803 break;
2804
2805 offset += data_written;
2806 data_ptr += data_written;
2807 image_size -= data_written;
2808 }
2809
2810 if (!status) {
2811 /* Commit the FW written */
2812 status = lancer_cmd_write_object(adapter, &flash_cmd,
2813 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2814 &data_written, &add_status);
2815 }
2816
2817 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2818 flash_cmd.dma);
2819 if (status) {
2820 dev_err(&adapter->pdev->dev,
2821 "Firmware load error. "
2822 "Status code: 0x%x Additional Status: 0x%x\n",
2823 status, add_status);
2824 goto lancer_fw_exit;
2825 }
2826
2827 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2828lancer_fw_exit:
2829 return status;
2830}
2831
2832static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2833{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002834 struct flash_file_hdr_g2 *fhdr;
2835 struct flash_file_hdr_g3 *fhdr3;
2836 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002837 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002838 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002839 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002840
2841 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002842 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002843
Ajit Khaparde84517482009-09-04 03:12:16 +00002844 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002845 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2846 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002847 if (!flash_cmd.va) {
2848 status = -ENOMEM;
2849 dev_err(&adapter->pdev->dev,
2850 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002851 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002852 }
2853
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002854 if ((adapter->generation == BE_GEN3) &&
2855 (get_ufigen_type(fhdr) == BE_GEN3)) {
2856 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002857 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2858 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002859 img_hdr_ptr = (struct image_hdr *) (fw->data +
2860 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002861 i * sizeof(struct image_hdr)));
2862 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2863 status = be_flash_data(adapter, fw, &flash_cmd,
2864 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002865 }
2866 } else if ((adapter->generation == BE_GEN2) &&
2867 (get_ufigen_type(fhdr) == BE_GEN2)) {
2868 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2869 } else {
2870 dev_err(&adapter->pdev->dev,
2871 "UFI and Interface are not compatible for flashing\n");
2872 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002873 }
2874
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002875 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2876 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002877 if (status) {
2878 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002879 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002880 }
2881
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002882 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002883
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002884be_fw_exit:
2885 return status;
2886}
2887
2888int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2889{
2890 const struct firmware *fw;
2891 int status;
2892
2893 if (!netif_running(adapter->netdev)) {
2894 dev_err(&adapter->pdev->dev,
2895 "Firmware load not allowed (interface is down)\n");
2896 return -1;
2897 }
2898
2899 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2900 if (status)
2901 goto fw_exit;
2902
2903 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2904
2905 if (lancer_chip(adapter))
2906 status = lancer_fw_download(adapter, fw);
2907 else
2908 status = be_fw_download(adapter, fw);
2909
Ajit Khaparde84517482009-09-04 03:12:16 +00002910fw_exit:
2911 release_firmware(fw);
2912 return status;
2913}
2914
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002915static struct net_device_ops be_netdev_ops = {
2916 .ndo_open = be_open,
2917 .ndo_stop = be_close,
2918 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002919 .ndo_set_rx_mode = be_set_multicast_list,
2920 .ndo_set_mac_address = be_mac_addr_set,
2921 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00002922 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002923 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2925 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002926 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002927 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002928 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002929 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002930};
2931
2932static void be_netdev_init(struct net_device *netdev)
2933{
2934 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002935 struct be_rx_obj *rxo;
2936 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002938 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002939 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2940 NETIF_F_HW_VLAN_TX;
2941 if (be_multi_rxq(adapter))
2942 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002943
2944 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002945 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002946
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002947 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002948 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002949
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002950 netdev->flags |= IFF_MULTICAST;
2951
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002952 /* Default settings for Rx and Tx flow control */
2953 adapter->rx_fc = true;
2954 adapter->tx_fc = true;
2955
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002956 netif_set_gso_max_size(netdev, 65535);
2957
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002958 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2959
2960 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2961
Sathya Perla3abcded2010-10-03 22:12:27 -07002962 for_all_rx_queues(adapter, rxo, i)
2963 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2964 BE_NAPI_WEIGHT);
2965
Sathya Perla5fb379e2009-06-18 00:02:59 +00002966 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002967 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968}
2969
2970static void be_unmap_pci_bars(struct be_adapter *adapter)
2971{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002972 if (adapter->csr)
2973 iounmap(adapter->csr);
2974 if (adapter->db)
2975 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002976}
2977
2978static int be_map_pci_bars(struct be_adapter *adapter)
2979{
2980 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00002981 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002982
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002983 if (lancer_chip(adapter)) {
2984 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2985 pci_resource_len(adapter->pdev, 0));
2986 if (addr == NULL)
2987 return -ENOMEM;
2988 adapter->db = addr;
2989 return 0;
2990 }
2991
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002992 if (be_physfn(adapter)) {
2993 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2994 pci_resource_len(adapter->pdev, 2));
2995 if (addr == NULL)
2996 return -ENOMEM;
2997 adapter->csr = addr;
2998 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002999
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003000 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003001 db_reg = 4;
3002 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003003 if (be_physfn(adapter))
3004 db_reg = 4;
3005 else
3006 db_reg = 0;
3007 }
3008 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3009 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010 if (addr == NULL)
3011 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003012 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003013
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003014 return 0;
3015pci_map_err:
3016 be_unmap_pci_bars(adapter);
3017 return -ENOMEM;
3018}
3019
3020
3021static void be_ctrl_cleanup(struct be_adapter *adapter)
3022{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003023 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003024
3025 be_unmap_pci_bars(adapter);
3026
3027 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003028 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3029 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003030
Sathya Perla5b8821b2011-08-02 19:57:44 +00003031 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003032 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003033 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3034 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003035}
3036
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003037static int be_ctrl_init(struct be_adapter *adapter)
3038{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003039 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3040 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003041 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003042 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003043
3044 status = be_map_pci_bars(adapter);
3045 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003046 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047
3048 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003049 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3050 mbox_mem_alloc->size,
3051 &mbox_mem_alloc->dma,
3052 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003054 status = -ENOMEM;
3055 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056 }
3057 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3058 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3059 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3060 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003061
Sathya Perla5b8821b2011-08-02 19:57:44 +00003062 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3063 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3064 &rx_filter->dma, GFP_KERNEL);
3065 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003066 status = -ENOMEM;
3067 goto free_mbox;
3068 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003069 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003070
Ivan Vecera29849612010-12-14 05:43:19 +00003071 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003072 spin_lock_init(&adapter->mcc_lock);
3073 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003074
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003075 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003076 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003077 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003078
3079free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003080 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3081 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003082
3083unmap_pci_bars:
3084 be_unmap_pci_bars(adapter);
3085
3086done:
3087 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003088}
3089
3090static void be_stats_cleanup(struct be_adapter *adapter)
3091{
Sathya Perla3abcded2010-10-03 22:12:27 -07003092 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003093
3094 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003095 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3096 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003097}
3098
3099static int be_stats_init(struct be_adapter *adapter)
3100{
Sathya Perla3abcded2010-10-03 22:12:27 -07003101 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003102
Selvin Xavier005d5692011-05-16 07:36:35 +00003103 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003104 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003105 } else {
3106 if (lancer_chip(adapter))
3107 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3108 else
3109 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3110 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003111 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3112 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003113 if (cmd->va == NULL)
3114 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003115 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003116 return 0;
3117}
3118
3119static void __devexit be_remove(struct pci_dev *pdev)
3120{
3121 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003122
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003123 if (!adapter)
3124 return;
3125
Somnath Koturf203af72010-10-25 23:01:03 +00003126 cancel_delayed_work_sync(&adapter->work);
3127
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003128 unregister_netdev(adapter->netdev);
3129
Sathya Perla5fb379e2009-06-18 00:02:59 +00003130 be_clear(adapter);
3131
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003132 be_stats_cleanup(adapter);
3133
3134 be_ctrl_cleanup(adapter);
3135
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003136 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003137 be_sriov_disable(adapter);
3138
Sathya Perla8d56ff12009-11-22 22:02:26 +00003139 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003140
3141 pci_set_drvdata(pdev, NULL);
3142 pci_release_regions(pdev);
3143 pci_disable_device(pdev);
3144
3145 free_netdev(adapter->netdev);
3146}
3147
Sathya Perla2243e2e2009-11-22 22:02:03 +00003148static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003150 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003151 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003152
Sathya Perla3abcded2010-10-03 22:12:27 -07003153 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3154 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003155 if (status)
3156 return status;
3157
3158 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003159
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003160 /* A default permanent address is given to each VF for Lancer*/
3161 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003162 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003163 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003164
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003165 if (status)
3166 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003167
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003168 if (!is_valid_ether_addr(mac))
3169 return -EADDRNOTAVAIL;
3170
3171 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3172 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3173 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003174
Ajit Khaparde3486be22010-07-23 02:04:54 +00003175 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003176 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3177 else
3178 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3179
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003180 status = be_cmd_get_cntl_attributes(adapter);
3181 if (status)
3182 return status;
3183
Sathya Perla3c8def92011-06-12 20:01:58 +00003184 if ((num_vfs && adapter->sriov_enabled) ||
3185 (adapter->function_mode & 0x400) ||
3186 lancer_chip(adapter) || !be_physfn(adapter)) {
3187 adapter->num_tx_qs = 1;
3188 netif_set_real_num_tx_queues(adapter->netdev,
3189 adapter->num_tx_qs);
3190 } else {
3191 adapter->num_tx_qs = MAX_TX_QS;
3192 }
3193
Sathya Perla2243e2e2009-11-22 22:02:03 +00003194 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003195}
3196
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003197static int be_dev_family_check(struct be_adapter *adapter)
3198{
3199 struct pci_dev *pdev = adapter->pdev;
3200 u32 sli_intf = 0, if_type;
3201
3202 switch (pdev->device) {
3203 case BE_DEVICE_ID1:
3204 case OC_DEVICE_ID1:
3205 adapter->generation = BE_GEN2;
3206 break;
3207 case BE_DEVICE_ID2:
3208 case OC_DEVICE_ID2:
3209 adapter->generation = BE_GEN3;
3210 break;
3211 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003212 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003213 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3214 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3215 SLI_INTF_IF_TYPE_SHIFT;
3216
3217 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3218 if_type != 0x02) {
3219 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3220 return -EINVAL;
3221 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003222 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3223 SLI_INTF_FAMILY_SHIFT);
3224 adapter->generation = BE_GEN3;
3225 break;
3226 default:
3227 adapter->generation = 0;
3228 }
3229 return 0;
3230}
3231
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003232static int lancer_wait_ready(struct be_adapter *adapter)
3233{
3234#define SLIPORT_READY_TIMEOUT 500
3235 u32 sliport_status;
3236 int status = 0, i;
3237
3238 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3239 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3240 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3241 break;
3242
3243 msleep(20);
3244 }
3245
3246 if (i == SLIPORT_READY_TIMEOUT)
3247 status = -1;
3248
3249 return status;
3250}
3251
3252static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3253{
3254 int status;
3255 u32 sliport_status, err, reset_needed;
3256 status = lancer_wait_ready(adapter);
3257 if (!status) {
3258 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3259 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3260 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3261 if (err && reset_needed) {
3262 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3263 adapter->db + SLIPORT_CONTROL_OFFSET);
3264
3265 /* check adapter has corrected the error */
3266 status = lancer_wait_ready(adapter);
3267 sliport_status = ioread32(adapter->db +
3268 SLIPORT_STATUS_OFFSET);
3269 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3270 SLIPORT_STATUS_RN_MASK);
3271 if (status || sliport_status)
3272 status = -1;
3273 } else if (err || reset_needed) {
3274 status = -1;
3275 }
3276 }
3277 return status;
3278}
3279
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003280static int __devinit be_probe(struct pci_dev *pdev,
3281 const struct pci_device_id *pdev_id)
3282{
3283 int status = 0;
3284 struct be_adapter *adapter;
3285 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003286
3287 status = pci_enable_device(pdev);
3288 if (status)
3289 goto do_none;
3290
3291 status = pci_request_regions(pdev, DRV_NAME);
3292 if (status)
3293 goto disable_dev;
3294 pci_set_master(pdev);
3295
Sathya Perla3c8def92011-06-12 20:01:58 +00003296 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003297 if (netdev == NULL) {
3298 status = -ENOMEM;
3299 goto rel_reg;
3300 }
3301 adapter = netdev_priv(netdev);
3302 adapter->pdev = pdev;
3303 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003304
3305 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003306 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003307 goto free_netdev;
3308
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003310 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003311
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003312 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003313 if (!status) {
3314 netdev->features |= NETIF_F_HIGHDMA;
3315 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003316 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003317 if (status) {
3318 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3319 goto free_netdev;
3320 }
3321 }
3322
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003323 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003324 if (adapter->sriov_enabled) {
3325 adapter->vf_cfg = kcalloc(num_vfs,
3326 sizeof(struct be_vf_cfg), GFP_KERNEL);
3327
3328 if (!adapter->vf_cfg)
3329 goto free_netdev;
3330 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003331
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003332 status = be_ctrl_init(adapter);
3333 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003334 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003335
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003336 if (lancer_chip(adapter)) {
3337 status = lancer_test_and_set_rdy_state(adapter);
3338 if (status) {
3339 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003340 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003341 }
3342 }
3343
Sathya Perla2243e2e2009-11-22 22:02:03 +00003344 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003345 if (be_physfn(adapter)) {
3346 status = be_cmd_POST(adapter);
3347 if (status)
3348 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003349 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003350
3351 /* tell fw we're ready to fire cmds */
3352 status = be_cmd_fw_init(adapter);
3353 if (status)
3354 goto ctrl_clean;
3355
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003356 status = be_cmd_reset_function(adapter);
3357 if (status)
3358 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003360 status = be_stats_init(adapter);
3361 if (status)
3362 goto ctrl_clean;
3363
Sathya Perla2243e2e2009-11-22 22:02:03 +00003364 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003365 if (status)
3366 goto stats_clean;
3367
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003368 /* The INTR bit may be set in the card when probed by a kdump kernel
3369 * after a crash.
3370 */
3371 if (!lancer_chip(adapter))
3372 be_intr_set(adapter, false);
3373
Sathya Perla3abcded2010-10-03 22:12:27 -07003374 be_msix_enable(adapter);
3375
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003376 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003377
Sathya Perla5fb379e2009-06-18 00:02:59 +00003378 status = be_setup(adapter);
3379 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003380 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003381
Sathya Perla3abcded2010-10-03 22:12:27 -07003382 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003383 status = register_netdev(netdev);
3384 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003385 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003386
Ajit Khapardee6319362011-02-11 13:35:41 +00003387 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003388 u8 mac_speed;
Ajit Khaparded0381c42011-04-19 12:11:55 +00003389 u16 vf, lnk_speed;
3390
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003391 if (!lancer_chip(adapter)) {
3392 status = be_vf_eth_addr_config(adapter);
3393 if (status)
3394 goto unreg_netdev;
3395 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003396
3397 for (vf = 0; vf < num_vfs; vf++) {
Sathya Perlaea172a02011-08-02 19:57:42 +00003398 status = be_cmd_link_status_query(adapter, &mac_speed,
3399 &lnk_speed, vf + 1);
Ajit Khaparded0381c42011-04-19 12:11:55 +00003400 if (!status)
3401 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3402 else
3403 goto unreg_netdev;
3404 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003405 }
3406
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003407 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003408
Somnath Koturf203af72010-10-25 23:01:03 +00003409 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003410 return 0;
3411
Ajit Khapardee6319362011-02-11 13:35:41 +00003412unreg_netdev:
3413 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003414unsetup:
3415 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003416msix_disable:
3417 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003418stats_clean:
3419 be_stats_cleanup(adapter);
3420ctrl_clean:
3421 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003422free_vf_cfg:
3423 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003424free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003425 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003426 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003427 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003428rel_reg:
3429 pci_release_regions(pdev);
3430disable_dev:
3431 pci_disable_device(pdev);
3432do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003433 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003434 return status;
3435}
3436
3437static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3438{
3439 struct be_adapter *adapter = pci_get_drvdata(pdev);
3440 struct net_device *netdev = adapter->netdev;
3441
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003442 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003443 if (adapter->wol)
3444 be_setup_wol(adapter, true);
3445
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446 netif_device_detach(netdev);
3447 if (netif_running(netdev)) {
3448 rtnl_lock();
3449 be_close(netdev);
3450 rtnl_unlock();
3451 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003452 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003453 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003454
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003455 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003456 pci_save_state(pdev);
3457 pci_disable_device(pdev);
3458 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3459 return 0;
3460}
3461
3462static int be_resume(struct pci_dev *pdev)
3463{
3464 int status = 0;
3465 struct be_adapter *adapter = pci_get_drvdata(pdev);
3466 struct net_device *netdev = adapter->netdev;
3467
3468 netif_device_detach(netdev);
3469
3470 status = pci_enable_device(pdev);
3471 if (status)
3472 return status;
3473
3474 pci_set_power_state(pdev, 0);
3475 pci_restore_state(pdev);
3476
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003477 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003478 /* tell fw we're ready to fire cmds */
3479 status = be_cmd_fw_init(adapter);
3480 if (status)
3481 return status;
3482
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003483 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003484 if (netif_running(netdev)) {
3485 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003486 be_open(netdev);
3487 rtnl_unlock();
3488 }
3489 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003490
3491 if (adapter->wol)
3492 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003493
3494 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003495 return 0;
3496}
3497
Sathya Perla82456b02010-02-17 01:35:37 +00003498/*
3499 * An FLR will stop BE from DMAing any data.
3500 */
3501static void be_shutdown(struct pci_dev *pdev)
3502{
3503 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003504
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003505 if (!adapter)
3506 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003507
Sathya Perla0f4a6822011-03-21 20:49:28 +00003508 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003509
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003510 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003511
Sathya Perla82456b02010-02-17 01:35:37 +00003512 if (adapter->wol)
3513 be_setup_wol(adapter, true);
3514
Ajit Khaparde57841862011-04-06 18:08:43 +00003515 be_cmd_reset_function(adapter);
3516
Sathya Perla82456b02010-02-17 01:35:37 +00003517 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003518}
3519
Sathya Perlacf588472010-02-14 21:22:01 +00003520static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3521 pci_channel_state_t state)
3522{
3523 struct be_adapter *adapter = pci_get_drvdata(pdev);
3524 struct net_device *netdev = adapter->netdev;
3525
3526 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3527
3528 adapter->eeh_err = true;
3529
3530 netif_device_detach(netdev);
3531
3532 if (netif_running(netdev)) {
3533 rtnl_lock();
3534 be_close(netdev);
3535 rtnl_unlock();
3536 }
3537 be_clear(adapter);
3538
3539 if (state == pci_channel_io_perm_failure)
3540 return PCI_ERS_RESULT_DISCONNECT;
3541
3542 pci_disable_device(pdev);
3543
3544 return PCI_ERS_RESULT_NEED_RESET;
3545}
3546
3547static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3548{
3549 struct be_adapter *adapter = pci_get_drvdata(pdev);
3550 int status;
3551
3552 dev_info(&adapter->pdev->dev, "EEH reset\n");
3553 adapter->eeh_err = false;
3554
3555 status = pci_enable_device(pdev);
3556 if (status)
3557 return PCI_ERS_RESULT_DISCONNECT;
3558
3559 pci_set_master(pdev);
3560 pci_set_power_state(pdev, 0);
3561 pci_restore_state(pdev);
3562
3563 /* Check if card is ok and fw is ready */
3564 status = be_cmd_POST(adapter);
3565 if (status)
3566 return PCI_ERS_RESULT_DISCONNECT;
3567
3568 return PCI_ERS_RESULT_RECOVERED;
3569}
3570
3571static void be_eeh_resume(struct pci_dev *pdev)
3572{
3573 int status = 0;
3574 struct be_adapter *adapter = pci_get_drvdata(pdev);
3575 struct net_device *netdev = adapter->netdev;
3576
3577 dev_info(&adapter->pdev->dev, "EEH resume\n");
3578
3579 pci_save_state(pdev);
3580
3581 /* tell fw we're ready to fire cmds */
3582 status = be_cmd_fw_init(adapter);
3583 if (status)
3584 goto err;
3585
3586 status = be_setup(adapter);
3587 if (status)
3588 goto err;
3589
3590 if (netif_running(netdev)) {
3591 status = be_open(netdev);
3592 if (status)
3593 goto err;
3594 }
3595 netif_device_attach(netdev);
3596 return;
3597err:
3598 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003599}
3600
3601static struct pci_error_handlers be_eeh_handlers = {
3602 .error_detected = be_eeh_err_detected,
3603 .slot_reset = be_eeh_reset,
3604 .resume = be_eeh_resume,
3605};
3606
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003607static struct pci_driver be_driver = {
3608 .name = DRV_NAME,
3609 .id_table = be_dev_ids,
3610 .probe = be_probe,
3611 .remove = be_remove,
3612 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003613 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003614 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003615 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003616};
3617
3618static int __init be_init_module(void)
3619{
Joe Perches8e95a202009-12-03 07:58:21 +00003620 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3621 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003622 printk(KERN_WARNING DRV_NAME
3623 " : Module param rx_frag_size must be 2048/4096/8192."
3624 " Using 2048\n");
3625 rx_frag_size = 2048;
3626 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003627
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003628 return pci_register_driver(&be_driver);
3629}
3630module_init(be_init_module);
3631
3632static void __exit be_exit_module(void)
3633{
3634 pci_unregister_driver(&be_driver);
3635}
3636module_exit(be_exit_module);